summaryrefslogtreecommitdiff
path: root/rr-cache
diff options
context:
space:
mode:
authorBjorn Andersson <bjorn.andersson@linaro.org>2018-06-16 23:36:09 -0700
committerBjorn Andersson <bjorn.andersson@linaro.org>2018-06-16 23:36:09 -0700
commit3c5a6f65218ca6092d8241f273b11a54d3dfb0c6 (patch)
treeccea68cd073b433671078fe0ff6c1ea6e8c89ba9 /rr-cache
parent742625615f10a1c7e7c4ab3110b02bc295243a14 (diff)
New rr-cache entries from ci-merge
Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
Diffstat (limited to 'rr-cache')
-rw-r--r--rr-cache/03b6e25b31cdeaefcf332323934d7e98f96d97d2/preimage8142
-rw-r--r--rr-cache/03b6e25b31cdeaefcf332323934d7e98f96d97d2/preimage.18142
-rw-r--r--rr-cache/0403d426b570cfb211d5c6743c695f2ab374c7e4/postimage736
-rw-r--r--rr-cache/0403d426b570cfb211d5c6743c695f2ab374c7e4/preimage777
-rw-r--r--rr-cache/09b4becfe0c54fb49c1464dea98b9b48d956217b/postimage9
-rw-r--r--rr-cache/09b4becfe0c54fb49c1464dea98b9b48d956217b/preimage13
-rw-r--r--rr-cache/09b4becfe0c54fb49c1464dea98b9b48d956217b/thisimage13
-rw-r--r--rr-cache/0ac478e0d1fd3f2fc2e4604b3d28397dabba37a9/postimage24
-rw-r--r--rr-cache/0ac478e0d1fd3f2fc2e4604b3d28397dabba37a9/preimage36
-rw-r--r--rr-cache/0ac478e0d1fd3f2fc2e4604b3d28397dabba37a9/thisimage36
-rw-r--r--rr-cache/0f0d636541673b35ab6e901db0d60610f0cf1c49/preimage2857
-rw-r--r--rr-cache/0f0ead2f874d2ed7910c132b40bbe82ac6705cc9/preimage268
-rw-r--r--rr-cache/1113c4083bc0a2b3f76ca5529d22dc317a13669d/postimage.1470
-rw-r--r--rr-cache/1113c4083bc0a2b3f76ca5529d22dc317a13669d/preimage510
-rw-r--r--rr-cache/1113c4083bc0a2b3f76ca5529d22dc317a13669d/preimage.1510
-rw-r--r--rr-cache/1113c4083bc0a2b3f76ca5529d22dc317a13669d/thisimage.1501
-rw-r--r--rr-cache/1225921e44e1228d6ac5edfc49b9bf583677fd71/postimage.222
-rw-r--r--rr-cache/1225921e44e1228d6ac5edfc49b9bf583677fd71/preimage31
-rw-r--r--rr-cache/1225921e44e1228d6ac5edfc49b9bf583677fd71/preimage.131
-rw-r--r--rr-cache/1225921e44e1228d6ac5edfc49b9bf583677fd71/preimage.231
-rw-r--r--rr-cache/1225921e44e1228d6ac5edfc49b9bf583677fd71/thisimage.231
-rw-r--r--rr-cache/17b1ce0c1269b821be9f793e477f850010223173/postimage131
-rw-r--r--rr-cache/17b1ce0c1269b821be9f793e477f850010223173/preimage195
-rw-r--r--rr-cache/1860c4dd358223c839fa46222e1804a4ae4f2e7c/postimage27
-rw-r--r--rr-cache/1860c4dd358223c839fa46222e1804a4ae4f2e7c/preimage43
-rw-r--r--rr-cache/1860c4dd358223c839fa46222e1804a4ae4f2e7c/thisimage43
-rw-r--r--rr-cache/196e4598f8fdc3b02404cb22d591e9b5915f7498/postimage291
-rw-r--r--rr-cache/196e4598f8fdc3b02404cb22d591e9b5915f7498/preimage295
-rw-r--r--rr-cache/196e4598f8fdc3b02404cb22d591e9b5915f7498/thisimage295
-rw-r--r--rr-cache/1a12c9e7bc9e61f3fd259e77b7e798a449dd5e0d/preimage440
-rw-r--r--rr-cache/1a12c9e7bc9e61f3fd259e77b7e798a449dd5e0d/preimage.1440
-rw-r--r--rr-cache/1a12c9e7bc9e61f3fd259e77b7e798a449dd5e0d/preimage.2440
-rw-r--r--rr-cache/1a12c9e7bc9e61f3fd259e77b7e798a449dd5e0d/preimage.3440
-rw-r--r--rr-cache/1aa2efd67a58fd95dfe8b312372912124fd54dc4/postimage1570
-rw-r--r--rr-cache/1aa2efd67a58fd95dfe8b312372912124fd54dc4/preimage1653
-rw-r--r--rr-cache/1c6069db9c53a786586220741b0b3dd4006abdea/postimage33
-rw-r--r--rr-cache/1c6069db9c53a786586220741b0b3dd4006abdea/preimage66
-rw-r--r--rr-cache/1c6069db9c53a786586220741b0b3dd4006abdea/thisimage66
-rw-r--r--rr-cache/2ae3d854ca8be3633d3b2594ca033a2c9ac35200/postimage221
-rw-r--r--rr-cache/2ae3d854ca8be3633d3b2594ca033a2c9ac35200/preimage223
-rw-r--r--rr-cache/2ae3d854ca8be3633d3b2594ca033a2c9ac35200/thisimage223
-rw-r--r--rr-cache/2c69692917e377a3670a7f5157224e007a435365/postimage522
-rw-r--r--rr-cache/2c69692917e377a3670a7f5157224e007a435365/preimage616
-rw-r--r--rr-cache/2e0fbc4890901254069ecb4576f1f83f3c3209e4/preimage669
-rw-r--r--rr-cache/2e0fbc4890901254069ecb4576f1f83f3c3209e4/preimage.1669
-rw-r--r--rr-cache/2fc1266133748afcc7d0159d2f427f62bc0fb28c/postimage18
-rw-r--r--rr-cache/2fc1266133748afcc7d0159d2f427f62bc0fb28c/preimage21
-rw-r--r--rr-cache/2fc1266133748afcc7d0159d2f427f62bc0fb28c/thisimage21
-rw-r--r--rr-cache/3152d54bb8498f5b3a678c7d7e9641f795736fa8/postimage757
-rw-r--r--rr-cache/3152d54bb8498f5b3a678c7d7e9641f795736fa8/preimage801
-rw-r--r--rr-cache/3152d54bb8498f5b3a678c7d7e9641f795736fa8/thisimage801
-rw-r--r--rr-cache/321216f1938295007a308be9906b0a7e68880687/postimage51
-rw-r--r--rr-cache/321216f1938295007a308be9906b0a7e68880687/preimage104
-rw-r--r--rr-cache/321216f1938295007a308be9906b0a7e68880687/thisimage104
-rw-r--r--rr-cache/35d34cc98831cadf6f64867b703a11ec02db6365/postimage156
-rw-r--r--rr-cache/35d34cc98831cadf6f64867b703a11ec02db6365/preimage159
-rw-r--r--rr-cache/35d34cc98831cadf6f64867b703a11ec02db6365/thisimage159
-rw-r--r--rr-cache/3b5769cf96e6c1ad9c66e9a9eca1adef3f180dde/preimage43
-rw-r--r--rr-cache/3b5da2a9450cb2ebd4e2f88f94085248db799a64/postimage.1123
-rw-r--r--rr-cache/3b5da2a9450cb2ebd4e2f88f94085248db799a64/preimage127
-rw-r--r--rr-cache/3b5da2a9450cb2ebd4e2f88f94085248db799a64/preimage.1127
-rw-r--r--rr-cache/3b5da2a9450cb2ebd4e2f88f94085248db799a64/thisimage.1127
-rw-r--r--rr-cache/3b8d5ca2ef10d1975b8a81563bf7cbcead9923d8/preimage1039
-rw-r--r--rr-cache/41a5a26760c0c7d81b8463f6e476126578e8739e/preimage1051
-rw-r--r--rr-cache/41a5a26760c0c7d81b8463f6e476126578e8739e/preimage.11051
-rw-r--r--rr-cache/41a5a26760c0c7d81b8463f6e476126578e8739e/preimage.21051
-rw-r--r--rr-cache/43c60db993e04ae4bbaad73360b168398f7c9aa4/preimage3988
-rw-r--r--rr-cache/43c60db993e04ae4bbaad73360b168398f7c9aa4/preimage.13988
-rw-r--r--rr-cache/43c60db993e04ae4bbaad73360b168398f7c9aa4/preimage.23988
-rw-r--r--rr-cache/43c60db993e04ae4bbaad73360b168398f7c9aa4/preimage.33988
-rw-r--r--rr-cache/44553d9ece3afb8faca21ac853fce183d527cb76/postimage66
-rw-r--r--rr-cache/44553d9ece3afb8faca21ac853fce183d527cb76/preimage126
-rw-r--r--rr-cache/44553d9ece3afb8faca21ac853fce183d527cb76/thisimage126
-rw-r--r--rr-cache/45c71f5f3b20c146e79944c1febc8c47e83e82c1/preimage634
-rw-r--r--rr-cache/4a8797e491ea33c914db6a50118dd923972b062c/preimage28
-rw-r--r--rr-cache/4dc2a85bc024568d1d44bbde78440fbdd72486b9/preimage1891
-rw-r--r--rr-cache/4dee8d4658e5f1a7754d6fdf7f6ac71c18f63172/postimage84
-rw-r--r--rr-cache/4dee8d4658e5f1a7754d6fdf7f6ac71c18f63172/preimage137
-rw-r--r--rr-cache/4dee8d4658e5f1a7754d6fdf7f6ac71c18f63172/thisimage137
-rw-r--r--rr-cache/50fb8c9f6de7874da63dc84eeefb2014c9123899/postimage28
-rw-r--r--rr-cache/50fb8c9f6de7874da63dc84eeefb2014c9123899/preimage40
-rw-r--r--rr-cache/50fb8c9f6de7874da63dc84eeefb2014c9123899/thisimage40
-rw-r--r--rr-cache/511ee021ca21c4482bc2820278820cb282866ece/preimage13
-rw-r--r--rr-cache/521b913e8a5b94999eff25f480e11e42f1488f77/postimage255
-rw-r--r--rr-cache/521b913e8a5b94999eff25f480e11e42f1488f77/preimage403
-rw-r--r--rr-cache/521b913e8a5b94999eff25f480e11e42f1488f77/thisimage403
-rw-r--r--rr-cache/55079890805cf5c92a96f3ecef732cb5a510f73e/postimage1351
-rw-r--r--rr-cache/55079890805cf5c92a96f3ecef732cb5a510f73e/preimage1434
-rw-r--r--rr-cache/56cd0ae6607c5a99bf05b02c665b3767b0c05110/preimage281
-rw-r--r--rr-cache/5784c3b9d4a4d66742fc4285016624c722e91bc7/postimage1224
-rw-r--r--rr-cache/5784c3b9d4a4d66742fc4285016624c722e91bc7/preimage1228
-rw-r--r--rr-cache/584605a29b683540fca16950917b48cb773a78be/preimage195
-rw-r--r--rr-cache/584605a29b683540fca16950917b48cb773a78be/preimage.1195
-rw-r--r--rr-cache/5a7fbb8eb469ab7e1359820fd91982b9b6c7f2a6/preimage1205
-rw-r--r--rr-cache/5a7fbb8eb469ab7e1359820fd91982b9b6c7f2a6/preimage.11205
-rw-r--r--rr-cache/5a7fbb8eb469ab7e1359820fd91982b9b6c7f2a6/preimage.21204
-rw-r--r--rr-cache/5a7fbb8eb469ab7e1359820fd91982b9b6c7f2a6/preimage.31205
-rw-r--r--rr-cache/5be910894ae8db7b83cdac81cc58dc83c4671161/postimage8
-rw-r--r--rr-cache/5be910894ae8db7b83cdac81cc58dc83c4671161/preimage15
-rw-r--r--rr-cache/5be910894ae8db7b83cdac81cc58dc83c4671161/thisimage15
-rw-r--r--rr-cache/60b96944c00d3e22b751173a6dc3d18c9a24b7ee/preimage1341
-rw-r--r--rr-cache/651becc8f80dad1e588d533860a7befbceb8be94/postimage380
-rw-r--r--rr-cache/651becc8f80dad1e588d533860a7befbceb8be94/preimage554
-rw-r--r--rr-cache/651becc8f80dad1e588d533860a7befbceb8be94/thisimage554
-rw-r--r--rr-cache/65a8676cb8d5da7722e86a660c8e70588e6a3297/postimage23
-rw-r--r--rr-cache/65a8676cb8d5da7722e86a660c8e70588e6a3297/preimage33
-rw-r--r--rr-cache/65a8676cb8d5da7722e86a660c8e70588e6a3297/thisimage33
-rw-r--r--rr-cache/65c17a388bfd3e72fad64d277f9613b493c799f3/preimage278
-rw-r--r--rr-cache/65c17a388bfd3e72fad64d277f9613b493c799f3/preimage.1278
-rw-r--r--rr-cache/666e8cdd499373d4f426f6a9c73ff840683446b7/preimage943
-rw-r--r--rr-cache/722f55cef1394c13414ff3d97a7f9594145c9179/postimage.11771
-rw-r--r--rr-cache/722f55cef1394c13414ff3d97a7f9594145c9179/preimage1777
-rw-r--r--rr-cache/722f55cef1394c13414ff3d97a7f9594145c9179/preimage.11777
-rw-r--r--rr-cache/722f55cef1394c13414ff3d97a7f9594145c9179/thisimage.11777
-rw-r--r--rr-cache/73eed3cfc6daf7318b95fa21c8447b3b17437276/thisimage184
-rw-r--r--rr-cache/7801da766901361f856c697332a300f85bfc6d93/postimage22
-rw-r--r--rr-cache/7801da766901361f856c697332a300f85bfc6d93/preimage35
-rw-r--r--rr-cache/7801da766901361f856c697332a300f85bfc6d93/thisimage35
-rw-r--r--rr-cache/8a4b54fff5a9c114500b6d66e8d0eeb044dae269/postimage109
-rw-r--r--rr-cache/8a4b54fff5a9c114500b6d66e8d0eeb044dae269/preimage159
-rw-r--r--rr-cache/8a4b54fff5a9c114500b6d66e8d0eeb044dae269/thisimage159
-rw-r--r--rr-cache/8e0943aa3bcebd1a9dad662390dc8d5358d6c9a3/postimage.193
-rw-r--r--rr-cache/8e0943aa3bcebd1a9dad662390dc8d5358d6c9a3/preimage104
-rw-r--r--rr-cache/8e0943aa3bcebd1a9dad662390dc8d5358d6c9a3/preimage.1104
-rw-r--r--rr-cache/8e0943aa3bcebd1a9dad662390dc8d5358d6c9a3/thisimage.1104
-rw-r--r--rr-cache/96f5f65a10705f9495fa7195eaf40e288db44217/postimage92
-rw-r--r--rr-cache/96f5f65a10705f9495fa7195eaf40e288db44217/preimage134
-rw-r--r--rr-cache/96f5f65a10705f9495fa7195eaf40e288db44217/thisimage134
-rw-r--r--rr-cache/9d52687238be7967036fe7e752f5c0259d75823d/postimage1422
-rw-r--r--rr-cache/9d52687238be7967036fe7e752f5c0259d75823d/preimage1505
-rw-r--r--rr-cache/9f497fbd837e3e347df386df77071ea45b8db013/postimage624
-rw-r--r--rr-cache/9f497fbd837e3e347df386df77071ea45b8db013/preimage953
-rw-r--r--rr-cache/9f497fbd837e3e347df386df77071ea45b8db013/thisimage953
-rw-r--r--rr-cache/a6df22f29fac3dfcfddaa23d13d678c7fe51ce0c/preimage513
-rw-r--r--rr-cache/a8dd18ed0fa01371cd5de7b73610e430e01651d8/postimage.1117
-rw-r--r--rr-cache/a8dd18ed0fa01371cd5de7b73610e430e01651d8/preimage117
-rw-r--r--rr-cache/a8dd18ed0fa01371cd5de7b73610e430e01651d8/preimage.1117
-rw-r--r--rr-cache/a8dd18ed0fa01371cd5de7b73610e430e01651d8/thisimage.1117
-rw-r--r--rr-cache/a9a9ab3978419c7b94ec2d91c6577197d01e7fb8/postimage15
-rw-r--r--rr-cache/a9a9ab3978419c7b94ec2d91c6577197d01e7fb8/preimage24
-rw-r--r--rr-cache/a9a9ab3978419c7b94ec2d91c6577197d01e7fb8/thisimage24
-rw-r--r--rr-cache/aa3ac109aefb3e67b8ab599b710bba0447979d05/postimage172
-rw-r--r--rr-cache/aa3ac109aefb3e67b8ab599b710bba0447979d05/preimage208
-rw-r--r--rr-cache/aa3ac109aefb3e67b8ab599b710bba0447979d05/thisimage208
-rw-r--r--rr-cache/ad97e59e50cce01d2ea5e80599105a27ec6fafea/preimage1261
-rw-r--r--rr-cache/aeafc43e3cc81632ac1fba082712d9e41209fb06/preimage92
-rw-r--r--rr-cache/b99949bc1ff147a83e2e82fb563d2bcdc8be2c85/preimage1737
-rw-r--r--rr-cache/bd9fcd8e64f89f7d4c90ad64ee8bc94d4b3a7b0e/preimage804
-rw-r--r--rr-cache/c15c8898132c15bbe8de0f76d799b1e6eedea5f1/postimage1299
-rw-r--r--rr-cache/c15c8898132c15bbe8de0f76d799b1e6eedea5f1/preimage1328
-rw-r--r--rr-cache/c15c8898132c15bbe8de0f76d799b1e6eedea5f1/thisimage1328
-rw-r--r--rr-cache/c1e82d583f89c73ff0394c90cc67545bc41ca1e1/preimage399
-rw-r--r--rr-cache/c5977ed54f42f1f89a5a8eaa7cd413fc62def31b/preimage48
-rw-r--r--rr-cache/c5977ed54f42f1f89a5a8eaa7cd413fc62def31b/preimage.148
-rw-r--r--rr-cache/c5977ed54f42f1f89a5a8eaa7cd413fc62def31b/preimage.247
-rw-r--r--rr-cache/c5977ed54f42f1f89a5a8eaa7cd413fc62def31b/preimage.349
-rw-r--r--rr-cache/c8ab4f7b2ace1df1c0cf9f2a387abd7512dbd5e2/postimage712
-rw-r--r--rr-cache/c8ab4f7b2ace1df1c0cf9f2a387abd7512dbd5e2/preimage715
-rw-r--r--rr-cache/c8ab4f7b2ace1df1c0cf9f2a387abd7512dbd5e2/thisimage715
-rw-r--r--rr-cache/c9867e004c11ba39a2594258c2956921203f991b/postimage1399
-rw-r--r--rr-cache/c9867e004c11ba39a2594258c2956921203f991b/preimage2820
-rw-r--r--rr-cache/c9867e004c11ba39a2594258c2956921203f991b/thisimage2820
-rw-r--r--rr-cache/cb6d8b1d80552ec7a2b3c39738e60e9fd1b2a501/postimage211
-rw-r--r--rr-cache/cb6d8b1d80552ec7a2b3c39738e60e9fd1b2a501/preimage246
-rw-r--r--rr-cache/cb6d8b1d80552ec7a2b3c39738e60e9fd1b2a501/thisimage246
-rw-r--r--rr-cache/ccea6594d754c8948c1cb70fbd1a0b1c81c949a2/postimage1006
-rw-r--r--rr-cache/ccea6594d754c8948c1cb70fbd1a0b1c81c949a2/preimage1583
-rw-r--r--rr-cache/ccea6594d754c8948c1cb70fbd1a0b1c81c949a2/thisimage1583
-rw-r--r--rr-cache/ceea2ce3f317f0e8444d0451756824b2d30128a7/preimage184
-rw-r--r--rr-cache/ceea2ce3f317f0e8444d0451756824b2d30128a7/preimage.1184
-rw-r--r--rr-cache/ceea2ce3f317f0e8444d0451756824b2d30128a7/preimage.2184
-rw-r--r--rr-cache/ceea2ce3f317f0e8444d0451756824b2d30128a7/preimage.3184
-rw-r--r--rr-cache/d09618b40c98717b5b0fc8128f9144c0fa1d2fde/postimage397
-rw-r--r--rr-cache/d09618b40c98717b5b0fc8128f9144c0fa1d2fde/preimage457
-rw-r--r--rr-cache/d1c7ff7b4ef48a1df740523a59e433002bf72915/postimage383
-rw-r--r--rr-cache/d1c7ff7b4ef48a1df740523a59e433002bf72915/preimage558
-rw-r--r--rr-cache/d1c7ff7b4ef48a1df740523a59e433002bf72915/thisimage558
-rw-r--r--rr-cache/d518adaa86824df51e502dbc536338e404075a83/thisimage2
-rw-r--r--rr-cache/d88ff7d9a0549d818568ec1a71b807a10ec5ff01/postimage33
-rw-r--r--rr-cache/d88ff7d9a0549d818568ec1a71b807a10ec5ff01/preimage55
-rw-r--r--rr-cache/d88ff7d9a0549d818568ec1a71b807a10ec5ff01/thisimage55
-rw-r--r--rr-cache/d933caf45b7472f2f16f0f80f807051a1eefe987/postimage66
-rw-r--r--rr-cache/d933caf45b7472f2f16f0f80f807051a1eefe987/preimage69
-rw-r--r--rr-cache/d933caf45b7472f2f16f0f80f807051a1eefe987/thisimage69
-rw-r--r--rr-cache/da2f4fa23cdc2d2a73d81a6b53be31ece76d492c/postimage646
-rw-r--r--rr-cache/da2f4fa23cdc2d2a73d81a6b53be31ece76d492c/preimage1390
-rw-r--r--rr-cache/da2f4fa23cdc2d2a73d81a6b53be31ece76d492c/thisimage1390
-rw-r--r--rr-cache/dda7250f4f5d0e7703ebbcda2aed73cea685cc09/postimage1255
-rw-r--r--rr-cache/dda7250f4f5d0e7703ebbcda2aed73cea685cc09/preimage1256
-rw-r--r--rr-cache/df72d29db70d6c63c309680f5104de95fee0c2ce/postimage1303
-rw-r--r--rr-cache/df72d29db70d6c63c309680f5104de95fee0c2ce/preimage1638
-rw-r--r--rr-cache/df72d29db70d6c63c309680f5104de95fee0c2ce/thisimage1638
-rw-r--r--rr-cache/dfb0515b85f9029dc7d918f636a84498bb3f477b/preimage1384
-rw-r--r--rr-cache/e0eebca18d94a992b7afdfbb051d272595421e25/postimage111
-rw-r--r--rr-cache/e0eebca18d94a992b7afdfbb051d272595421e25/preimage144
-rw-r--r--rr-cache/e0eebca18d94a992b7afdfbb051d272595421e25/thisimage144
-rw-r--r--rr-cache/e1c2f3a67c9cac9e94ff139d86cfbac0aec953eb/postimage24
-rw-r--r--rr-cache/e1c2f3a67c9cac9e94ff139d86cfbac0aec953eb/preimage34
-rw-r--r--rr-cache/e1c2f3a67c9cac9e94ff139d86cfbac0aec953eb/thisimage34
-rw-r--r--rr-cache/e21fcfcfd5d7e0b79ef526b7b9fb02629f206140/preimage451
-rw-r--r--rr-cache/e49ddc0a8632bffe1c6ec9e189a142814e970dd0/preimage8153
-rw-r--r--rr-cache/e4f5bcdfe9ae1a8e2987cd336d16246930272948/preimage215
-rw-r--r--rr-cache/e53cae16483794180d7a06fb5b7dee0d4975d4a9/postimage174
-rw-r--r--rr-cache/e53cae16483794180d7a06fb5b7dee0d4975d4a9/preimage177
-rw-r--r--rr-cache/e53cae16483794180d7a06fb5b7dee0d4975d4a9/thisimage177
-rw-r--r--rr-cache/e5e8949aba32b84a8e970724fed87908fad4111c/postimage85
-rw-r--r--rr-cache/e5e8949aba32b84a8e970724fed87908fad4111c/preimage89
-rw-r--r--rr-cache/e5e8949aba32b84a8e970724fed87908fad4111c/thisimage89
-rw-r--r--rr-cache/ece77c075cbd2f88b3ef8bcec6fb0533986e8390/preimage40
-rw-r--r--rr-cache/eeed3d17605fdb06a241cda9eed6d09d601101b6/postimage1495
-rw-r--r--rr-cache/eeed3d17605fdb06a241cda9eed6d09d601101b6/preimage2236
-rw-r--r--rr-cache/eeed3d17605fdb06a241cda9eed6d09d601101b6/thisimage2236
-rw-r--r--rr-cache/ef2ee746d553cafd4753c40a810c15f4701af2dc/postimage128
-rw-r--r--rr-cache/ef2ee746d553cafd4753c40a810c15f4701af2dc/preimage166
-rw-r--r--rr-cache/ef2ee746d553cafd4753c40a810c15f4701af2dc/thisimage166
-rw-r--r--rr-cache/f42e7086d16b7cb3e0f87b3759f54abcba5f2201/postimage744
-rw-r--r--rr-cache/f42e7086d16b7cb3e0f87b3759f54abcba5f2201/preimage747
-rw-r--r--rr-cache/f48c6d7c01caa8f8b83cdf196c25aa3780acdc0d/preimage258
-rw-r--r--rr-cache/f6d7ddf40c8291fd785d55ded2d5dd0b8572b153/postimage412
-rw-r--r--rr-cache/f6d7ddf40c8291fd785d55ded2d5dd0b8572b153/preimage423
-rw-r--r--rr-cache/f8a2c05b7200af21e9bd94cbb246b77e7384a142/postimage69
-rw-r--r--rr-cache/f8a2c05b7200af21e9bd94cbb246b77e7384a142/preimage93
-rw-r--r--rr-cache/f8a2c05b7200af21e9bd94cbb246b77e7384a142/thisimage93
223 files changed, 149051 insertions, 86 deletions
diff --git a/rr-cache/03b6e25b31cdeaefcf332323934d7e98f96d97d2/preimage b/rr-cache/03b6e25b31cdeaefcf332323934d7e98f96d97d2/preimage
new file mode 100644
index 0000000..2d89e6b
--- /dev/null
+++ b/rr-cache/03b6e25b31cdeaefcf332323934d7e98f96d97d2/preimage
@@ -0,0 +1,8142 @@
+/*
+ * Universal Flash Storage Host controller driver Core
+ *
+ * This code is based on drivers/scsi/ufs/ufshcd.c
+ * Copyright (C) 2011-2013 Samsung India Software Operations
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * Authors:
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
+ *
+ * The Linux Foundation chooses to take subject only to the GPLv2
+ * license terms, and distributes only under these terms.
+ */
+
+#include <linux/async.h>
+#include <linux/devfreq.h>
+#include <linux/nls.h>
+#include <linux/of.h>
+#include <linux/bitfield.h>
+#include "ufshcd.h"
+#include "ufs_quirks.h"
+#include "unipro.h"
+#include "ufs-sysfs.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/ufs.h>
+
+#define UFSHCD_REQ_SENSE_SIZE 18
+
+#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
+ UTP_TASK_REQ_COMPL |\
+ UFSHCD_ERROR_MASK)
+/* UIC command timeout, unit: ms */
+#define UIC_CMD_TIMEOUT 500
+
+/* NOP OUT retries waiting for NOP IN response */
+#define NOP_OUT_RETRIES 10
+/* Timeout after 30 msecs if NOP OUT hangs without response */
+#define NOP_OUT_TIMEOUT 30 /* msecs */
+
+/* Query request retries */
+#define QUERY_REQ_RETRIES 3
+/* Query request timeout */
+#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
+
+/* Task management command timeout */
+#define TM_CMD_TIMEOUT 100 /* msecs */
+
+/* maximum number of retries for a general UIC command */
+#define UFS_UIC_COMMAND_RETRIES 3
+
+/* maximum number of link-startup retries */
+#define DME_LINKSTARTUP_RETRIES 3
+
+/* Maximum retries for Hibern8 enter */
+#define UIC_HIBERN8_ENTER_RETRIES 3
+
+/* maximum number of reset retries before giving up */
+#define MAX_HOST_RESET_RETRIES 5
+
+/* Expose the flag value from utp_upiu_query.value */
+#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
+
+/* Interrupt aggregation default timeout, unit: 40us */
+#define INT_AGGR_DEF_TO 0x02
+
+#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
+ ({ \
+ int _ret; \
+ if (_on) \
+ _ret = ufshcd_enable_vreg(_dev, _vreg); \
+ else \
+ _ret = ufshcd_disable_vreg(_dev, _vreg); \
+ _ret; \
+ })
+
+#define ufshcd_hex_dump(prefix_str, buf, len) \
+print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false)
+
+enum {
+ UFSHCD_MAX_CHANNEL = 0,
+ UFSHCD_MAX_ID = 1,
+ UFSHCD_CMD_PER_LUN = 32,
+ UFSHCD_CAN_QUEUE = 32,
+};
+
+/* UFSHCD states */
+enum {
+ UFSHCD_STATE_RESET,
+ UFSHCD_STATE_ERROR,
+ UFSHCD_STATE_OPERATIONAL,
+ UFSHCD_STATE_EH_SCHEDULED,
+};
+
+/* UFSHCD error handling flags */
+enum {
+ UFSHCD_EH_IN_PROGRESS = (1 << 0),
+};
+
+/* UFSHCD UIC layer error flags */
+enum {
+ UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
+ UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
+ UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
+ UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
+ UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
+ UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
+};
+
+#define ufshcd_set_eh_in_progress(h) \
+ ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
+#define ufshcd_eh_in_progress(h) \
+ ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
+#define ufshcd_clear_eh_in_progress(h) \
+ ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
+
+#define ufshcd_set_ufs_dev_active(h) \
+ ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
+#define ufshcd_set_ufs_dev_sleep(h) \
+ ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
+#define ufshcd_set_ufs_dev_poweroff(h) \
+ ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
+#define ufshcd_is_ufs_dev_active(h) \
+ ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
+#define ufshcd_is_ufs_dev_sleep(h) \
+ ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
+#define ufshcd_is_ufs_dev_poweroff(h) \
+ ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
+
+struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
+ {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
+ {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
+ {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
+ {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
+ {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
+ {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
+};
+
+static inline enum ufs_dev_pwr_mode
+ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
+{
+ return ufs_pm_lvl_states[lvl].dev_state;
+}
+
+static inline enum uic_link_state
+ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
+{
+ return ufs_pm_lvl_states[lvl].link_state;
+}
+
+static inline enum ufs_pm_level
+ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
+ enum uic_link_state link_state)
+{
+ enum ufs_pm_level lvl;
+
+ for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
+ if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
+ (ufs_pm_lvl_states[lvl].link_state == link_state))
+ return lvl;
+ }
+
+ /* if no match found, return the level 0 */
+ return UFS_PM_LVL_0;
+}
+
+static struct ufs_dev_fix ufs_fixups[] = {
+ /* UFS cards deviations table */
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+ UFS_DEVICE_NO_FASTAUTO),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
+ UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
+ UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
+ UFS_DEVICE_QUIRK_PA_TACTIVATE),
+ UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
+ UFS_DEVICE_QUIRK_PA_TACTIVATE),
+ UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
+ UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
+
+ END_FIX
+};
+
+static void ufshcd_tmc_handler(struct ufs_hba *hba);
+static void ufshcd_async_scan(void *data, async_cookie_t cookie);
+static int ufshcd_reset_and_restore(struct ufs_hba *hba);
+static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
+static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
+static void ufshcd_hba_exit(struct ufs_hba *hba);
+static int ufshcd_probe_hba(struct ufs_hba *hba);
+static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
+ bool skip_ref_clk);
+static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
+static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
+static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
+static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
+static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
+static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
+static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
+static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
+static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
+static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
+static irqreturn_t ufshcd_intr(int irq, void *__hba);
+static int ufshcd_change_power_mode(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *pwr_mode);
+static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
+{
+ return tag >= 0 && tag < hba->nutrs;
+}
+
+static inline int ufshcd_enable_irq(struct ufs_hba *hba)
+{
+ int ret = 0;
+
+ if (!hba->is_irq_enabled) {
+ ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
+ hba);
+ if (ret)
+ dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
+ __func__, ret);
+ hba->is_irq_enabled = true;
+ }
+
+ return ret;
+}
+
+static inline void ufshcd_disable_irq(struct ufs_hba *hba)
+{
+ if (hba->is_irq_enabled) {
+ free_irq(hba->irq, hba);
+ hba->is_irq_enabled = false;
+ }
+}
+
+static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
+{
+ if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
+ scsi_unblock_requests(hba->host);
+}
+
+static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
+{
+ if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
+ scsi_block_requests(hba->host);
+}
+
+/* replace non-printable or non-ASCII characters with spaces */
+static inline void ufshcd_remove_non_printable(char *val)
+{
+ if (!val)
+ return;
+
+ if (*val < 0x20 || *val > 0x7e)
+ *val = ' ';
+}
+
+static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
+ const char *str)
+{
+ struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
+
+ trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->sc.cdb);
+}
+
+static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag,
+ const char *str)
+{
+ struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
+
+ trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->qr);
+}
+
+static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
+ const char *str)
+{
+ struct utp_task_req_desc *descp;
+ struct utp_upiu_task_req *task_req;
+ int off = (int)tag - hba->nutrs;
+
+ descp = &hba->utmrdl_base_addr[off];
+ task_req = (struct utp_upiu_task_req *)descp->task_req_upiu;
+ trace_ufshcd_upiu(dev_name(hba->dev), str, &task_req->header,
+ &task_req->input_param1);
+}
+
+static void ufshcd_add_command_trace(struct ufs_hba *hba,
+ unsigned int tag, const char *str)
+{
+ sector_t lba = -1;
+ u8 opcode = 0;
+ u32 intr, doorbell;
+ struct ufshcd_lrb *lrbp;
+ int transfer_len = -1;
+
+ /* trace UPIU also */
+ ufshcd_add_cmd_upiu_trace(hba, tag, str);
+
+ if (!trace_ufshcd_command_enabled())
+ return;
+
+ lrbp = &hba->lrb[tag];
+
+ if (lrbp->cmd) { /* data phase exists */
+ opcode = (u8)(*lrbp->cmd->cmnd);
+ if ((opcode == READ_10) || (opcode == WRITE_10)) {
+ /*
+ * Currently we only fully trace read(10) and write(10)
+ * commands
+ */
+ if (lrbp->cmd->request && lrbp->cmd->request->bio)
+ lba =
+ lrbp->cmd->request->bio->bi_iter.bi_sector;
+ transfer_len = be32_to_cpu(
+ lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
+ }
+ }
+
+ intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+ doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ trace_ufshcd_command(dev_name(hba->dev), str, tag,
+ doorbell, transfer_len, intr, lba, opcode);
+}
+
+static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
+{
+ struct ufs_clk_info *clki;
+ struct list_head *head = &hba->clk_list_head;
+
+ if (list_empty(head))
+ return;
+
+ list_for_each_entry(clki, head, list) {
+ if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
+ clki->max_freq)
+ dev_err(hba->dev, "clk: %s, rate: %u\n",
+ clki->name, clki->curr_freq);
+ }
+}
+
+static void ufshcd_print_uic_err_hist(struct ufs_hba *hba,
+ struct ufs_uic_err_reg_hist *err_hist, char *err_name)
+{
+ int i;
+
+ for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) {
+ int p = (i + err_hist->pos - 1) % UIC_ERR_REG_HIST_LENGTH;
+
+ if (err_hist->reg[p] == 0)
+ continue;
+ dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, i,
+ err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
+ }
+}
+
+static void ufshcd_print_host_regs(struct ufs_hba *hba)
+{
+ /*
+ * hex_dump reads its data without the readl macro. This might
+ * cause inconsistency issues on some platform, as the printed
+ * values may be from cache and not the most recent value.
+ * To know whether you are looking at an un-cached version verify
+ * that IORESOURCE_MEM flag is on when xxx_get_resource() is invoked
+ * during platform/pci probe function.
+ */
+ ufshcd_hex_dump("host regs: ", hba->mmio_base, UFSHCI_REG_SPACE_SIZE);
+ dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n",
+ hba->ufs_version, hba->capabilities);
+ dev_err(hba->dev,
+ "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x\n",
+ (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
+ dev_err(hba->dev,
+ "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d\n",
+ ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
+ hba->ufs_stats.hibern8_exit_cnt);
+
+ ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
+ ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
+ ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
+ ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
+ ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
+
+ ufshcd_print_clk_freqs(hba);
+
+ if (hba->vops && hba->vops->dbg_register_dump)
+ hba->vops->dbg_register_dump(hba);
+}
+
+static
+void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
+{
+ struct ufshcd_lrb *lrbp;
+ int prdt_length;
+ int tag;
+
+ for_each_set_bit(tag, &bitmap, hba->nutrs) {
+ lrbp = &hba->lrb[tag];
+
+ dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
+ tag, ktime_to_us(lrbp->issue_time_stamp));
+ dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
+ tag, ktime_to_us(lrbp->compl_time_stamp));
+ dev_err(hba->dev,
+ "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
+ tag, (u64)lrbp->utrd_dma_addr);
+
+ ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
+ sizeof(struct utp_transfer_req_desc));
+ dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
+ (u64)lrbp->ucd_req_dma_addr);
+ ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
+ sizeof(struct utp_upiu_req));
+ dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
+ (u64)lrbp->ucd_rsp_dma_addr);
+ ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
+ sizeof(struct utp_upiu_rsp));
+
+ prdt_length = le16_to_cpu(
+ lrbp->utr_descriptor_ptr->prd_table_length);
+ dev_err(hba->dev,
+ "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
+ tag, prdt_length,
+ (u64)lrbp->ucd_prdt_dma_addr);
+
+ if (pr_prdt)
+ ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
+ sizeof(struct ufshcd_sg_entry) * prdt_length);
+ }
+}
+
+static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
+{
+ struct utp_task_req_desc *tmrdp;
+ int tag;
+
+ for_each_set_bit(tag, &bitmap, hba->nutmrs) {
+ tmrdp = &hba->utmrdl_base_addr[tag];
+ dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
+ ufshcd_hex_dump("TM TRD: ", &tmrdp->header,
+ sizeof(struct request_desc_header));
+ dev_err(hba->dev, "TM[%d] - Task Management Request UPIU\n",
+ tag);
+ ufshcd_hex_dump("TM REQ: ", tmrdp->task_req_upiu,
+ sizeof(struct utp_upiu_req));
+ dev_err(hba->dev, "TM[%d] - Task Management Response UPIU\n",
+ tag);
+ ufshcd_hex_dump("TM RSP: ", tmrdp->task_rsp_upiu,
+ sizeof(struct utp_task_req_desc));
+ }
+}
+
+static void ufshcd_print_host_state(struct ufs_hba *hba)
+{
+ dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
+ dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
+ hba->lrb_in_use, hba->outstanding_reqs, hba->outstanding_tasks);
+ dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
+ hba->saved_err, hba->saved_uic_err);
+ dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
+ dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
+ hba->pm_op_in_progress, hba->is_sys_suspended);
+ dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
+ hba->auto_bkops_enabled, hba->host->host_self_blocked);
+ dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
+ dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
+ hba->eh_flags, hba->req_abort_count);
+ dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
+ hba->capabilities, hba->caps);
+ dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
+ hba->dev_quirks);
+}
+
+/**
+ * ufshcd_print_pwr_info - print power params as saved in hba
+ * power info
+ * @hba: per-adapter instance
+ */
+static void ufshcd_print_pwr_info(struct ufs_hba *hba)
+{
+ static const char * const names[] = {
+ "INVALID MODE",
+ "FAST MODE",
+ "SLOW_MODE",
+ "INVALID MODE",
+ "FASTAUTO_MODE",
+ "SLOWAUTO_MODE",
+ "INVALID MODE",
+ };
+
+ dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
+ __func__,
+ hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
+ hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
+ names[hba->pwr_info.pwr_rx],
+ names[hba->pwr_info.pwr_tx],
+ hba->pwr_info.hs_rate);
+}
+
+/*
+ * ufshcd_wait_for_register - wait for register value to change
+ * @hba - per-adapter interface
+ * @reg - mmio register offset
+ * @mask - mask to apply to read register value
+ * @val - wait condition
+ * @interval_us - polling interval in microsecs
+ * @timeout_ms - timeout in millisecs
+ * @can_sleep - perform sleep or just spin
+ *
+ * Returns -ETIMEDOUT on error, zero on success
+ */
+int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
+ u32 val, unsigned long interval_us,
+ unsigned long timeout_ms, bool can_sleep)
+{
+ int err = 0;
+ unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
+
+ /* ignore bits that we don't intend to wait on */
+ val = val & mask;
+
+ while ((ufshcd_readl(hba, reg) & mask) != val) {
+ if (can_sleep)
+ usleep_range(interval_us, interval_us + 50);
+ else
+ udelay(interval_us);
+ if (time_after(jiffies, timeout)) {
+ if ((ufshcd_readl(hba, reg) & mask) != val)
+ err = -ETIMEDOUT;
+ break;
+ }
+ }
+
+ return err;
+}
+
+/**
+ * ufshcd_get_intr_mask - Get the interrupt bit mask
+ * @hba: Pointer to adapter instance
+ *
+ * Returns interrupt bit mask per version
+ */
+static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
+{
+ u32 intr_mask = 0;
+
+ switch (hba->ufs_version) {
+ case UFSHCI_VERSION_10:
+ intr_mask = INTERRUPT_MASK_ALL_VER_10;
+ break;
+ case UFSHCI_VERSION_11:
+ case UFSHCI_VERSION_20:
+ intr_mask = INTERRUPT_MASK_ALL_VER_11;
+ break;
+ case UFSHCI_VERSION_21:
+ default:
+ intr_mask = INTERRUPT_MASK_ALL_VER_21;
+ break;
+ }
+
+ return intr_mask;
+}
+
+/**
+ * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
+ * @hba: Pointer to adapter instance
+ *
+ * Returns UFSHCI version supported by the controller
+ */
+static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
+{
+ if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
+ return ufshcd_vops_get_ufs_hci_version(hba);
+
+ return ufshcd_readl(hba, REG_UFS_VERSION);
+}
+
+/**
+ * ufshcd_is_device_present - Check if any device connected to
+ * the host controller
+ * @hba: pointer to adapter instance
+ *
+ * Returns true if device present, false if no device detected
+ */
+static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
+{
+ return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
+ DEVICE_PRESENT) ? true : false;
+}
+
+/**
+ * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
+ * @lrbp: pointer to local command reference block
+ *
+ * This function is used to get the OCS field from UTRD
+ * Returns the OCS field in the UTRD
+ */
+static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
+{
+ return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
+}
+
+/**
+ * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
+ * @task_req_descp: pointer to utp_task_req_desc structure
+ *
+ * This function is used to get the OCS field from UTMRD
+ * Returns the OCS field in the UTMRD
+ */
+static inline int
+ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
+{
+ return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
+}
+
+/**
+ * ufshcd_get_tm_free_slot - get a free slot for task management request
+ * @hba: per adapter instance
+ * @free_slot: pointer to variable with available slot value
+ *
+ * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
+ * Returns 0 if free slot is not available, else return 1 with tag value
+ * in @free_slot.
+ */
+static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
+{
+ int tag;
+ bool ret = false;
+
+ if (!free_slot)
+ goto out;
+
+ do {
+ tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
+ if (tag >= hba->nutmrs)
+ goto out;
+ } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
+
+ *free_slot = tag;
+ ret = true;
+out:
+ return ret;
+}
+
+static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
+{
+ clear_bit_unlock(slot, &hba->tm_slots_in_use);
+}
+
+/**
+ * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
+ * @hba: per adapter instance
+ * @pos: position of the bit to be cleared
+ */
+static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
+{
+ if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
+ ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
+ else
+ ufshcd_writel(hba, ~(1 << pos),
+ REG_UTP_TRANSFER_REQ_LIST_CLEAR);
+}
+
+/**
+ * ufshcd_utmrl_clear - Clear a bit in UTRMLCLR register
+ * @hba: per adapter instance
+ * @pos: position of the bit to be cleared
+ */
+static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
+{
+ if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
+ ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
+ else
+ ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
+}
+
+/**
+ * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
+ * @hba: per adapter instance
+ * @tag: position of the bit to be cleared
+ */
+static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
+{
+ __clear_bit(tag, &hba->outstanding_reqs);
+}
+
+/**
+ * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
+ * @reg: Register value of host controller status
+ *
+ * Returns integer, 0 on Success and positive value if failed
+ */
+static inline int ufshcd_get_lists_status(u32 reg)
+{
+ return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
+}
+
+/**
+ * ufshcd_get_uic_cmd_result - Get the UIC command result
+ * @hba: Pointer to adapter instance
+ *
+ * This function gets the result of UIC command completion
+ * Returns 0 on success, non zero value on error
+ */
+static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
+{
+ return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
+ MASK_UIC_COMMAND_RESULT;
+}
+
+/**
+ * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
+ * @hba: Pointer to adapter instance
+ *
+ * This function gets UIC command argument3
+ * Returns 0 on success, non zero value on error
+ */
+static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
+{
+ return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
+}
+
+/**
+ * ufshcd_get_req_rsp - returns the TR response transaction type
+ * @ucd_rsp_ptr: pointer to response UPIU
+ */
+static inline int
+ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
+{
+ return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
+}
+
+/**
+ * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
+ * @ucd_rsp_ptr: pointer to response UPIU
+ *
+ * This function gets the response status and scsi_status from response UPIU
+ * Returns the response result code.
+ */
+static inline int
+ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
+{
+ return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
+}
+
+/*
+ * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
+ * from response UPIU
+ * @ucd_rsp_ptr: pointer to response UPIU
+ *
+ * Return the data segment length.
+ */
+static inline unsigned int
+ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
+{
+ return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
+ MASK_RSP_UPIU_DATA_SEG_LEN;
+}
+
+/**
+ * ufshcd_is_exception_event - Check if the device raised an exception event
+ * @ucd_rsp_ptr: pointer to response UPIU
+ *
+ * The function checks if the device raised an exception event indicated in
+ * the Device Information field of response UPIU.
+ *
+ * Returns true if exception is raised, false otherwise.
+ */
+static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
+{
+ return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
+ MASK_RSP_EXCEPTION_EVENT ? true : false;
+}
+
+/**
+ * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
+ * @hba: per adapter instance
+ */
+static inline void
+ufshcd_reset_intr_aggr(struct ufs_hba *hba)
+{
+ ufshcd_writel(hba, INT_AGGR_ENABLE |
+ INT_AGGR_COUNTER_AND_TIMER_RESET,
+ REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
+}
+
+/**
+ * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
+ * @hba: per adapter instance
+ * @cnt: Interrupt aggregation counter threshold
+ * @tmout: Interrupt aggregation timeout value
+ */
+static inline void
+ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
+{
+ ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
+ INT_AGGR_COUNTER_THLD_VAL(cnt) |
+ INT_AGGR_TIMEOUT_VAL(tmout),
+ REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
+}
+
+/**
+ * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
+ * @hba: per adapter instance
+ */
+static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
+{
+ ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
+}
+
+/**
+ * ufshcd_enable_run_stop_reg - Enable run-stop registers,
+ * When run-stop registers are set to 1, it indicates the
+ * host controller that it can process the requests
+ * @hba: per adapter instance
+ */
+static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
+{
+ ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
+ REG_UTP_TASK_REQ_LIST_RUN_STOP);
+ ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
+ REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
+}
+
+/**
+ * ufshcd_hba_start - Start controller initialization sequence
+ * @hba: per adapter instance
+ */
+static inline void ufshcd_hba_start(struct ufs_hba *hba)
+{
+ ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
+}
+
+/**
+ * ufshcd_is_hba_active - Get controller state
+ * @hba: per adapter instance
+ *
+ * Returns false if controller is active, true otherwise
+ */
+static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
+{
+ return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
+ ? false : true;
+}
+
+u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
+{
+ /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
+ if ((hba->ufs_version == UFSHCI_VERSION_10) ||
+ (hba->ufs_version == UFSHCI_VERSION_11))
+ return UFS_UNIPRO_VER_1_41;
+ else
+ return UFS_UNIPRO_VER_1_6;
+}
+EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
+
+static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
+{
+ /*
+ * If both host and device support UniPro ver1.6 or later, PA layer
+ * parameters tuning happens during link startup itself.
+ *
+ * We can manually tune PA layer parameters if either host or device
+ * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
+ * logic simple, we will only do manual tuning if local unipro version
+ * doesn't support ver1.6 or later.
+ */
+ if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
+ return true;
+ else
+ return false;
+}
+
+static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
+{
+ int ret = 0;
+ struct ufs_clk_info *clki;
+ struct list_head *head = &hba->clk_list_head;
+ ktime_t start = ktime_get();
+ bool clk_state_changed = false;
+
+ if (list_empty(head))
+ goto out;
+
+ ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(clki, head, list) {
+ if (!IS_ERR_OR_NULL(clki->clk)) {
+ if (scale_up && clki->max_freq) {
+ if (clki->curr_freq == clki->max_freq)
+ continue;
+
+ clk_state_changed = true;
+ ret = clk_set_rate(clki->clk, clki->max_freq);
+ if (ret) {
+ dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
+ __func__, clki->name,
+ clki->max_freq, ret);
+ break;
+ }
+ trace_ufshcd_clk_scaling(dev_name(hba->dev),
+ "scaled up", clki->name,
+ clki->curr_freq,
+ clki->max_freq);
+
+ clki->curr_freq = clki->max_freq;
+
+ } else if (!scale_up && clki->min_freq) {
+ if (clki->curr_freq == clki->min_freq)
+ continue;
+
+ clk_state_changed = true;
+ ret = clk_set_rate(clki->clk, clki->min_freq);
+ if (ret) {
+ dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
+ __func__, clki->name,
+ clki->min_freq, ret);
+ break;
+ }
+ trace_ufshcd_clk_scaling(dev_name(hba->dev),
+ "scaled down", clki->name,
+ clki->curr_freq,
+ clki->min_freq);
+ clki->curr_freq = clki->min_freq;
+ }
+ }
+ dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
+ clki->name, clk_get_rate(clki->clk));
+ }
+
+ ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
+
+out:
+ if (clk_state_changed)
+ trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
+ (scale_up ? "up" : "down"),
+ ktime_to_us(ktime_sub(ktime_get(), start)), ret);
+ return ret;
+}
+
+/**
+ * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
+ * @hba: per adapter instance
+ * @scale_up: True if scaling up and false if scaling down
+ *
+ * Returns true if scaling is required, false otherwise.
+ */
+static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
+ bool scale_up)
+{
+ struct ufs_clk_info *clki;
+ struct list_head *head = &hba->clk_list_head;
+
+ if (list_empty(head))
+ return false;
+
+ list_for_each_entry(clki, head, list) {
+ if (!IS_ERR_OR_NULL(clki->clk)) {
+ if (scale_up && clki->max_freq) {
+ if (clki->curr_freq == clki->max_freq)
+ continue;
+ return true;
+ } else if (!scale_up && clki->min_freq) {
+ if (clki->curr_freq == clki->min_freq)
+ continue;
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
+ u64 wait_timeout_us)
+{
+ unsigned long flags;
+ int ret = 0;
+ u32 tm_doorbell;
+ u32 tr_doorbell;
+ bool timeout = false, do_last_check = false;
+ ktime_t start;
+
+ ufshcd_hold(hba, false);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ /*
+ * Wait for all the outstanding tasks/transfer requests.
+ * Verify by checking the doorbell registers are clear.
+ */
+ start = ktime_get();
+ do {
+ if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
+ tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ if (!tm_doorbell && !tr_doorbell) {
+ timeout = false;
+ break;
+ } else if (do_last_check) {
+ break;
+ }
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ schedule();
+ if (ktime_to_us(ktime_sub(ktime_get(), start)) >
+ wait_timeout_us) {
+ timeout = true;
+ /*
+ * We might have scheduled out for long time so make
+ * sure to check if doorbells are cleared by this time
+ * or not.
+ */
+ do_last_check = true;
+ }
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ } while (tm_doorbell || tr_doorbell);
+
+ if (timeout) {
+ dev_err(hba->dev,
+ "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
+ __func__, tm_doorbell, tr_doorbell);
+ ret = -EBUSY;
+ }
+out:
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ufshcd_release(hba);
+ return ret;
+}
+
+/**
+ * ufshcd_scale_gear - scale up/down UFS gear
+ * @hba: per adapter instance
+ * @scale_up: True for scaling up gear and false for scaling down
+ *
+ * Returns 0 for success,
+ * Returns -EBUSY if scaling can't happen at this time
+ * Returns non-zero for any other errors
+ */
+static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
+{
+ #define UFS_MIN_GEAR_TO_SCALE_DOWN UFS_HS_G1
+ int ret = 0;
+ struct ufs_pa_layer_attr new_pwr_info;
+
+ if (scale_up) {
+ memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
+ sizeof(struct ufs_pa_layer_attr));
+ } else {
+ memcpy(&new_pwr_info, &hba->pwr_info,
+ sizeof(struct ufs_pa_layer_attr));
+
+ if (hba->pwr_info.gear_tx > UFS_MIN_GEAR_TO_SCALE_DOWN
+ || hba->pwr_info.gear_rx > UFS_MIN_GEAR_TO_SCALE_DOWN) {
+ /* save the current power mode */
+ memcpy(&hba->clk_scaling.saved_pwr_info.info,
+ &hba->pwr_info,
+ sizeof(struct ufs_pa_layer_attr));
+
+ /* scale down gear */
+ new_pwr_info.gear_tx = UFS_MIN_GEAR_TO_SCALE_DOWN;
+ new_pwr_info.gear_rx = UFS_MIN_GEAR_TO_SCALE_DOWN;
+ }
+ }
+
+ /* check if the power mode needs to be changed or not? */
+ ret = ufshcd_change_power_mode(hba, &new_pwr_info);
+
+ if (ret)
+ dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
+ __func__, ret,
+ hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
+ new_pwr_info.gear_tx, new_pwr_info.gear_rx);
+
+ return ret;
+}
+
+static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
+{
+ #define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */
+ int ret = 0;
+ /*
+ * make sure that there are no outstanding requests when
+ * clock scaling is in progress
+ */
+ ufshcd_scsi_block_requests(hba);
+ down_write(&hba->clk_scaling_lock);
+ if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
+ ret = -EBUSY;
+ up_write(&hba->clk_scaling_lock);
+ ufshcd_scsi_unblock_requests(hba);
+ }
+
+ return ret;
+}
+
+static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
+{
+ up_write(&hba->clk_scaling_lock);
+ ufshcd_scsi_unblock_requests(hba);
+}
+
+/**
+ * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
+ * @hba: per adapter instance
+ * @scale_up: True for scaling up and false for scalin down
+ *
+ * Returns 0 for success,
+ * Returns -EBUSY if scaling can't happen at this time
+ * Returns non-zero for any other errors
+ */
+static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
+{
+ int ret = 0;
+
+ /* let's not get into low power until clock scaling is completed */
+ ufshcd_hold(hba, false);
+
+ ret = ufshcd_clock_scaling_prepare(hba);
+ if (ret)
+ return ret;
+
+ /* scale down the gear before scaling down clocks */
+ if (!scale_up) {
+ ret = ufshcd_scale_gear(hba, false);
+ if (ret)
+ goto out;
+ }
+
+ ret = ufshcd_scale_clks(hba, scale_up);
+ if (ret) {
+ if (!scale_up)
+ ufshcd_scale_gear(hba, true);
+ goto out;
+ }
+
+ /* scale up the gear after scaling up clocks */
+ if (scale_up) {
+ ret = ufshcd_scale_gear(hba, true);
+ if (ret) {
+ ufshcd_scale_clks(hba, false);
+ goto out;
+ }
+ }
+
+ ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
+
+out:
+ ufshcd_clock_scaling_unprepare(hba);
+ ufshcd_release(hba);
+ return ret;
+}
+
+static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
+{
+ struct ufs_hba *hba = container_of(work, struct ufs_hba,
+ clk_scaling.suspend_work);
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(hba->host->host_lock, irq_flags);
+ if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ return;
+ }
+ hba->clk_scaling.is_suspended = true;
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+
+ __ufshcd_suspend_clkscaling(hba);
+}
+
+static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
+{
+ struct ufs_hba *hba = container_of(work, struct ufs_hba,
+ clk_scaling.resume_work);
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(hba->host->host_lock, irq_flags);
+ if (!hba->clk_scaling.is_suspended) {
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ return;
+ }
+ hba->clk_scaling.is_suspended = false;
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+
+ devfreq_resume_device(hba->devfreq);
+}
+
+static int ufshcd_devfreq_target(struct device *dev,
+ unsigned long *freq, u32 flags)
+{
+ int ret = 0;
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ ktime_t start;
+ bool scale_up, sched_clk_scaling_suspend_work = false;
+ struct list_head *clk_list = &hba->clk_list_head;
+ struct ufs_clk_info *clki;
+ unsigned long irq_flags;
+
+ if (!ufshcd_is_clkscaling_supported(hba))
+ return -EINVAL;
+
+ spin_lock_irqsave(hba->host->host_lock, irq_flags);
+ if (ufshcd_eh_in_progress(hba)) {
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ return 0;
+ }
+
+ if (!hba->clk_scaling.active_reqs)
+ sched_clk_scaling_suspend_work = true;
+
+ if (list_empty(clk_list)) {
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ goto out;
+ }
+
+ clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
+ scale_up = (*freq == clki->max_freq) ? true : false;
+ if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ ret = 0;
+ goto out; /* no state change required */
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+
+ start = ktime_get();
+ ret = ufshcd_devfreq_scale(hba, scale_up);
+
+ trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
+ (scale_up ? "up" : "down"),
+ ktime_to_us(ktime_sub(ktime_get(), start)), ret);
+
+out:
+ if (sched_clk_scaling_suspend_work)
+ queue_work(hba->clk_scaling.workq,
+ &hba->clk_scaling.suspend_work);
+
+ return ret;
+}
+
+
+static int ufshcd_devfreq_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *stat)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_clk_scaling *scaling = &hba->clk_scaling;
+ unsigned long flags;
+
+ if (!ufshcd_is_clkscaling_supported(hba))
+ return -EINVAL;
+
+ memset(stat, 0, sizeof(*stat));
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (!scaling->window_start_t)
+ goto start_window;
+
+ if (scaling->is_busy_started)
+ scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
+ scaling->busy_start_t));
+
+ stat->total_time = jiffies_to_usecs((long)jiffies -
+ (long)scaling->window_start_t);
+ stat->busy_time = scaling->tot_busy_t;
+start_window:
+ scaling->window_start_t = jiffies;
+ scaling->tot_busy_t = 0;
+
+ if (hba->outstanding_reqs) {
+ scaling->busy_start_t = ktime_get();
+ scaling->is_busy_started = true;
+ } else {
+ scaling->busy_start_t = 0;
+ scaling->is_busy_started = false;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return 0;
+}
+
+static struct devfreq_dev_profile ufs_devfreq_profile = {
+ .polling_ms = 100,
+ .target = ufshcd_devfreq_target,
+ .get_dev_status = ufshcd_devfreq_get_dev_status,
+};
+
+static int ufshcd_devfreq_init(struct ufs_hba *hba)
+{
+ struct list_head *clk_list = &hba->clk_list_head;
+ struct ufs_clk_info *clki;
+ struct devfreq *devfreq;
+ int ret;
+
+ /* Skip devfreq if we don't have any clocks in the list */
+ if (list_empty(clk_list))
+ return 0;
+
+ clki = list_first_entry(clk_list, struct ufs_clk_info, list);
+ dev_pm_opp_add(hba->dev, clki->min_freq, 0);
+ dev_pm_opp_add(hba->dev, clki->max_freq, 0);
+
+ devfreq = devfreq_add_device(hba->dev,
+ &ufs_devfreq_profile,
+<<<<<<<
+ "simple_ondemand",
+=======
+ DEVFREQ_GOV_SIMPLE_ONDEMAND,
+>>>>>>>
+ NULL);
+ if (IS_ERR(devfreq)) {
+ ret = PTR_ERR(devfreq);
+ dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
+
+ dev_pm_opp_remove(hba->dev, clki->min_freq);
+ dev_pm_opp_remove(hba->dev, clki->max_freq);
+ return ret;
+ }
+
+ hba->devfreq = devfreq;
+
+ return 0;
+}
+
+static void ufshcd_devfreq_remove(struct ufs_hba *hba)
+{
+ struct list_head *clk_list = &hba->clk_list_head;
+ struct ufs_clk_info *clki;
+
+ if (!hba->devfreq)
+ return;
+
+ devfreq_remove_device(hba->devfreq);
+ hba->devfreq = NULL;
+
+ clki = list_first_entry(clk_list, struct ufs_clk_info, list);
+ dev_pm_opp_remove(hba->dev, clki->min_freq);
+ dev_pm_opp_remove(hba->dev, clki->max_freq);
+}
+
+static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
+{
+ unsigned long flags;
+
+ devfreq_suspend_device(hba->devfreq);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->clk_scaling.window_start_t = 0;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+
+static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
+{
+ unsigned long flags;
+ bool suspend = false;
+
+ if (!ufshcd_is_clkscaling_supported(hba))
+ return;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (!hba->clk_scaling.is_suspended) {
+ suspend = true;
+ hba->clk_scaling.is_suspended = true;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ if (suspend)
+ __ufshcd_suspend_clkscaling(hba);
+}
+
+static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
+{
+ unsigned long flags;
+ bool resume = false;
+
+ if (!ufshcd_is_clkscaling_supported(hba))
+ return;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (hba->clk_scaling.is_suspended) {
+ resume = true;
+ hba->clk_scaling.is_suspended = false;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ if (resume)
+ devfreq_resume_device(hba->devfreq);
+}
+
+static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
+}
+
+static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ u32 value;
+ int err;
+
+ if (kstrtou32(buf, 0, &value))
+ return -EINVAL;
+
+ value = !!value;
+ if (value == hba->clk_scaling.is_allowed)
+ goto out;
+
+ pm_runtime_get_sync(hba->dev);
+ ufshcd_hold(hba, false);
+
+ cancel_work_sync(&hba->clk_scaling.suspend_work);
+ cancel_work_sync(&hba->clk_scaling.resume_work);
+
+ hba->clk_scaling.is_allowed = value;
+
+ if (value) {
+ ufshcd_resume_clkscaling(hba);
+ } else {
+ ufshcd_suspend_clkscaling(hba);
+ err = ufshcd_devfreq_scale(hba, true);
+ if (err)
+ dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
+ __func__, err);
+ }
+
+ ufshcd_release(hba);
+ pm_runtime_put_sync(hba->dev);
+out:
+ return count;
+}
+
+static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
+{
+ hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
+ hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
+ sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
+ hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
+ hba->clk_scaling.enable_attr.attr.mode = 0644;
+ if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
+ dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
+}
+
+static void ufshcd_ungate_work(struct work_struct *work)
+{
+ int ret;
+ unsigned long flags;
+ struct ufs_hba *hba = container_of(work, struct ufs_hba,
+ clk_gating.ungate_work);
+
+ cancel_delayed_work_sync(&hba->clk_gating.gate_work);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (hba->clk_gating.state == CLKS_ON) {
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ goto unblock_reqs;
+ }
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ufshcd_setup_clocks(hba, true);
+
+ /* Exit from hibern8 */
+ if (ufshcd_can_hibern8_during_gating(hba)) {
+ /* Prevent gating in this path */
+ hba->clk_gating.is_suspended = true;
+ if (ufshcd_is_link_hibern8(hba)) {
+ ret = ufshcd_uic_hibern8_exit(hba);
+ if (ret)
+ dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
+ __func__, ret);
+ else
+ ufshcd_set_link_active(hba);
+ }
+ hba->clk_gating.is_suspended = false;
+ }
+unblock_reqs:
+ ufshcd_scsi_unblock_requests(hba);
+}
+
+/**
+ * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
+ * Also, exit from hibern8 mode and set the link as active.
+ * @hba: per adapter instance
+ * @async: This indicates whether caller should ungate clocks asynchronously.
+ */
+int ufshcd_hold(struct ufs_hba *hba, bool async)
+{
+ int rc = 0;
+ unsigned long flags;
+
+ if (!ufshcd_is_clkgating_allowed(hba))
+ goto out;
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->clk_gating.active_reqs++;
+
+ if (ufshcd_eh_in_progress(hba)) {
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return 0;
+ }
+
+start:
+ switch (hba->clk_gating.state) {
+ case CLKS_ON:
+ /*
+ * Wait for the ungate work to complete if in progress.
+ * Though the clocks may be in ON state, the link could
+ * still be in hibner8 state if hibern8 is allowed
+ * during clock gating.
+ * Make sure we exit hibern8 state also in addition to
+ * clocks being ON.
+ */
+ if (ufshcd_can_hibern8_during_gating(hba) &&
+ ufshcd_is_link_hibern8(hba)) {
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ flush_work(&hba->clk_gating.ungate_work);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ goto start;
+ }
+ break;
+ case REQ_CLKS_OFF:
+ if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
+ hba->clk_gating.state = CLKS_ON;
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
+ hba->clk_gating.state);
+ break;
+ }
+ /*
+ * If we are here, it means gating work is either done or
+ * currently running. Hence, fall through to cancel gating
+ * work and to enable clocks.
+ */
+ case CLKS_OFF:
+ ufshcd_scsi_block_requests(hba);
+ hba->clk_gating.state = REQ_CLKS_ON;
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
+ hba->clk_gating.state);
+ queue_work(hba->clk_gating.clk_gating_workq,
+ &hba->clk_gating.ungate_work);
+ /*
+ * fall through to check if we should wait for this
+ * work to be done or not.
+ */
+ case REQ_CLKS_ON:
+ if (async) {
+ rc = -EAGAIN;
+ hba->clk_gating.active_reqs--;
+ break;
+ }
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ flush_work(&hba->clk_gating.ungate_work);
+ /* Make sure state is CLKS_ON before returning */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ goto start;
+ default:
+ dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
+ __func__, hba->clk_gating.state);
+ break;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+out:
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ufshcd_hold);
+
+static void ufshcd_gate_work(struct work_struct *work)
+{
+ struct ufs_hba *hba = container_of(work, struct ufs_hba,
+ clk_gating.gate_work.work);
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ /*
+ * In case you are here to cancel this work the gating state
+ * would be marked as REQ_CLKS_ON. In this case save time by
+ * skipping the gating work and exit after changing the clock
+ * state to CLKS_ON.
+ */
+ if (hba->clk_gating.is_suspended ||
+ (hba->clk_gating.state == REQ_CLKS_ON)) {
+ hba->clk_gating.state = CLKS_ON;
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
+ hba->clk_gating.state);
+ goto rel_lock;
+ }
+
+ if (hba->clk_gating.active_reqs
+ || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
+ || hba->lrb_in_use || hba->outstanding_tasks
+ || hba->active_uic_cmd || hba->uic_async_done)
+ goto rel_lock;
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ /* put the link into hibern8 mode before turning off clocks */
+ if (ufshcd_can_hibern8_during_gating(hba)) {
+ if (ufshcd_uic_hibern8_enter(hba)) {
+ hba->clk_gating.state = CLKS_ON;
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
+ hba->clk_gating.state);
+ goto out;
+ }
+ ufshcd_set_link_hibern8(hba);
+ }
+
+ if (!ufshcd_is_link_active(hba))
+ ufshcd_setup_clocks(hba, false);
+ else
+ /* If link is active, device ref_clk can't be switched off */
+ __ufshcd_setup_clocks(hba, false, true);
+
+ /*
+ * In case you are here to cancel this work the gating state
+ * would be marked as REQ_CLKS_ON. In this case keep the state
+ * as REQ_CLKS_ON which would anyway imply that clocks are off
+ * and a request to turn them on is pending. By doing this way,
+ * we keep the state machine in tact and this would ultimately
+ * prevent from doing cancel work multiple times when there are
+ * new requests arriving before the current cancel work is done.
+ */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (hba->clk_gating.state == REQ_CLKS_OFF) {
+ hba->clk_gating.state = CLKS_OFF;
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
+ hba->clk_gating.state);
+ }
+rel_lock:
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+out:
+ return;
+}
+
+/* host lock must be held before calling this variant */
+static void __ufshcd_release(struct ufs_hba *hba)
+{
+ if (!ufshcd_is_clkgating_allowed(hba))
+ return;
+
+ hba->clk_gating.active_reqs--;
+
+ if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
+ || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
+ || hba->lrb_in_use || hba->outstanding_tasks
+ || hba->active_uic_cmd || hba->uic_async_done
+ || ufshcd_eh_in_progress(hba))
+ return;
+
+ hba->clk_gating.state = REQ_CLKS_OFF;
+ trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
+ schedule_delayed_work(&hba->clk_gating.gate_work,
+ msecs_to_jiffies(hba->clk_gating.delay_ms));
+}
+
+void ufshcd_release(struct ufs_hba *hba)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ __ufshcd_release(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+EXPORT_SYMBOL_GPL(ufshcd_release);
+
+static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
+}
+
+static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ unsigned long flags, value;
+
+ if (kstrtoul(buf, 0, &value))
+ return -EINVAL;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->clk_gating.delay_ms = value;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return count;
+}
+
+static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
+}
+
+static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ unsigned long flags;
+ u32 value;
+
+ if (kstrtou32(buf, 0, &value))
+ return -EINVAL;
+
+ value = !!value;
+ if (value == hba->clk_gating.is_enabled)
+ goto out;
+
+ if (value) {
+ ufshcd_release(hba);
+ } else {
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->clk_gating.active_reqs++;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ }
+
+ hba->clk_gating.is_enabled = value;
+out:
+ return count;
+}
+
+static void ufshcd_init_clk_gating(struct ufs_hba *hba)
+{
+ char wq_name[sizeof("ufs_clk_gating_00")];
+
+ if (!ufshcd_is_clkgating_allowed(hba))
+ return;
+
+ hba->clk_gating.delay_ms = 150;
+ INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
+ INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
+
+ snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
+ hba->host->host_no);
+ hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
+ WQ_MEM_RECLAIM);
+
+ hba->clk_gating.is_enabled = true;
+
+ hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
+ hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
+ sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
+ hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
+ hba->clk_gating.delay_attr.attr.mode = 0644;
+ if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
+ dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
+
+ hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
+ hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
+ sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
+ hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
+ hba->clk_gating.enable_attr.attr.mode = 0644;
+ if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
+ dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
+}
+
+static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
+{
+ if (!ufshcd_is_clkgating_allowed(hba))
+ return;
+ device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
+ device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
+ cancel_work_sync(&hba->clk_gating.ungate_work);
+ cancel_delayed_work_sync(&hba->clk_gating.gate_work);
+ destroy_workqueue(hba->clk_gating.clk_gating_workq);
+}
+
+/* Must be called with host lock acquired */
+static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
+{
+ bool queue_resume_work = false;
+
+ if (!ufshcd_is_clkscaling_supported(hba))
+ return;
+
+ if (!hba->clk_scaling.active_reqs++)
+ queue_resume_work = true;
+
+ if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
+ return;
+
+ if (queue_resume_work)
+ queue_work(hba->clk_scaling.workq,
+ &hba->clk_scaling.resume_work);
+
+ if (!hba->clk_scaling.window_start_t) {
+ hba->clk_scaling.window_start_t = jiffies;
+ hba->clk_scaling.tot_busy_t = 0;
+ hba->clk_scaling.is_busy_started = false;
+ }
+
+ if (!hba->clk_scaling.is_busy_started) {
+ hba->clk_scaling.busy_start_t = ktime_get();
+ hba->clk_scaling.is_busy_started = true;
+ }
+}
+
+static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
+{
+ struct ufs_clk_scaling *scaling = &hba->clk_scaling;
+
+ if (!ufshcd_is_clkscaling_supported(hba))
+ return;
+
+ if (!hba->outstanding_reqs && scaling->is_busy_started) {
+ scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
+ scaling->busy_start_t));
+ scaling->busy_start_t = 0;
+ scaling->is_busy_started = false;
+ }
+}
+/**
+ * ufshcd_send_command - Send SCSI or device management commands
+ * @hba: per adapter instance
+ * @task_tag: Task tag of the command
+ */
+static inline
+void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
+{
+ hba->lrb[task_tag].issue_time_stamp = ktime_get();
+ hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0);
+ ufshcd_clk_scaling_start_busy(hba);
+ __set_bit(task_tag, &hba->outstanding_reqs);
+ ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ /* Make sure that doorbell is committed immediately */
+ wmb();
+ ufshcd_add_command_trace(hba, task_tag, "send");
+}
+
+/**
+ * ufshcd_copy_sense_data - Copy sense data in case of check condition
+ * @lrbp: pointer to local reference block
+ */
+static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
+{
+ int len;
+ if (lrbp->sense_buffer &&
+ ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
+ int len_to_copy;
+
+ len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
+ len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
+
+ memcpy(lrbp->sense_buffer,
+ lrbp->ucd_rsp_ptr->sr.sense_data,
+ min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
+ }
+}
+
+/**
+ * ufshcd_copy_query_response() - Copy the Query Response and the data
+ * descriptor
+ * @hba: per adapter instance
+ * @lrbp: pointer to local reference block
+ */
+static
+int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
+
+ memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
+
+ /* Get the descriptor */
+ if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
+ u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
+ GENERAL_UPIU_REQUEST_SIZE;
+ u16 resp_len;
+ u16 buf_len;
+
+ /* data segment length */
+ resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
+ MASK_QUERY_DATA_SEG_LEN;
+ buf_len = be16_to_cpu(
+ hba->dev_cmd.query.request.upiu_req.length);
+ if (likely(buf_len >= resp_len)) {
+ memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
+ } else {
+ dev_warn(hba->dev,
+ "%s: Response size is bigger than buffer",
+ __func__);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ufshcd_hba_capabilities - Read controller capabilities
+ * @hba: per adapter instance
+ */
+static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
+{
+ hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
+
+ /* nutrs and nutmrs are 0 based values */
+ hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
+ hba->nutmrs =
+ ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
+}
+
+/**
+ * ufshcd_ready_for_uic_cmd - Check if controller is ready
+ * to accept UIC commands
+ * @hba: per adapter instance
+ * Return true on success, else false
+ */
+static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
+{
+ if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * ufshcd_get_upmcrs - Get the power mode change request status
+ * @hba: Pointer to adapter instance
+ *
+ * This function gets the UPMCRS field of HCS register
+ * Returns value of UPMCRS field
+ */
+static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
+{
+ return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
+}
+
+/**
+ * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
+ * @hba: per adapter instance
+ * @uic_cmd: UIC command
+ *
+ * Mutex must be held.
+ */
+static inline void
+ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+{
+ WARN_ON(hba->active_uic_cmd);
+
+ hba->active_uic_cmd = uic_cmd;
+
+ /* Write Args */
+ ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
+ ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
+ ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
+
+ /* Write UIC Cmd */
+ ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
+ REG_UIC_COMMAND);
+}
+
+/**
+ * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
+ * @hba: per adapter instance
+ * @uic_cmd: UIC command
+ *
+ * Must be called with mutex held.
+ * Returns 0 only if success.
+ */
+static int
+ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+{
+ int ret;
+ unsigned long flags;
+
+ if (wait_for_completion_timeout(&uic_cmd->done,
+ msecs_to_jiffies(UIC_CMD_TIMEOUT)))
+ ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
+ else
+ ret = -ETIMEDOUT;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->active_uic_cmd = NULL;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ return ret;
+}
+
+/**
+ * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
+ * @hba: per adapter instance
+ * @uic_cmd: UIC command
+ * @completion: initialize the completion only if this is set to true
+ *
+ * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
+ * with mutex held and host_lock locked.
+ * Returns 0 only if success.
+ */
+static int
+__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
+ bool completion)
+{
+ if (!ufshcd_ready_for_uic_cmd(hba)) {
+ dev_err(hba->dev,
+ "Controller not ready to accept UIC commands\n");
+ return -EIO;
+ }
+
+ if (completion)
+ init_completion(&uic_cmd->done);
+
+ ufshcd_dispatch_uic_cmd(hba, uic_cmd);
+
+ return 0;
+}
+
+/**
+ * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
+ * @hba: per adapter instance
+ * @uic_cmd: UIC command
+ *
+ * Returns 0 only if success.
+ */
+static int
+ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+{
+ int ret;
+ unsigned long flags;
+
+ ufshcd_hold(hba, false);
+ mutex_lock(&hba->uic_cmd_mutex);
+ ufshcd_add_delay_before_dme_cmd(hba);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ if (!ret)
+ ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
+
+ mutex_unlock(&hba->uic_cmd_mutex);
+
+ ufshcd_release(hba);
+ return ret;
+}
+
+/**
+ * ufshcd_map_sg - Map scatter-gather list to prdt
+ * @hba: per adapter instance
+ * @lrbp: pointer to local reference block
+ *
+ * Returns 0 in case of success, non-zero value in case of failure
+ */
+static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ struct ufshcd_sg_entry *prd_table;
+ struct scatterlist *sg;
+ struct scsi_cmnd *cmd;
+ int sg_segments;
+ int i;
+
+ cmd = lrbp->cmd;
+ sg_segments = scsi_dma_map(cmd);
+ if (sg_segments < 0)
+ return sg_segments;
+
+ if (sg_segments) {
+ if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
+ lrbp->utr_descriptor_ptr->prd_table_length =
+ cpu_to_le16((u16)(sg_segments *
+ sizeof(struct ufshcd_sg_entry)));
+ else
+ lrbp->utr_descriptor_ptr->prd_table_length =
+ cpu_to_le16((u16) (sg_segments));
+
+ prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
+
+ scsi_for_each_sg(cmd, sg, sg_segments, i) {
+ prd_table[i].size =
+ cpu_to_le32(((u32) sg_dma_len(sg))-1);
+ prd_table[i].base_addr =
+ cpu_to_le32(lower_32_bits(sg->dma_address));
+ prd_table[i].upper_addr =
+ cpu_to_le32(upper_32_bits(sg->dma_address));
+ prd_table[i].reserved = 0;
+ }
+ } else {
+ lrbp->utr_descriptor_ptr->prd_table_length = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * ufshcd_enable_intr - enable interrupts
+ * @hba: per adapter instance
+ * @intrs: interrupt bits
+ */
+static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
+{
+ u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+
+ if (hba->ufs_version == UFSHCI_VERSION_10) {
+ u32 rw;
+ rw = set & INTERRUPT_MASK_RW_VER_10;
+ set = rw | ((set ^ intrs) & intrs);
+ } else {
+ set |= intrs;
+ }
+
+ ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
+}
+
+/**
+ * ufshcd_disable_intr - disable interrupts
+ * @hba: per adapter instance
+ * @intrs: interrupt bits
+ */
+static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
+{
+ u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+
+ if (hba->ufs_version == UFSHCI_VERSION_10) {
+ u32 rw;
+ rw = (set & INTERRUPT_MASK_RW_VER_10) &
+ ~(intrs & INTERRUPT_MASK_RW_VER_10);
+ set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
+
+ } else {
+ set &= ~intrs;
+ }
+
+ ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
+}
+
+/**
+ * ufshcd_prepare_req_desc_hdr() - Fills the requests header
+ * descriptor according to request
+ * @lrbp: pointer to local reference block
+ * @upiu_flags: flags required in the header
+ * @cmd_dir: requests data direction
+ */
+static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
+ u32 *upiu_flags, enum dma_data_direction cmd_dir)
+{
+ struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
+ u32 data_direction;
+ u32 dword_0;
+
+ if (cmd_dir == DMA_FROM_DEVICE) {
+ data_direction = UTP_DEVICE_TO_HOST;
+ *upiu_flags = UPIU_CMD_FLAGS_READ;
+ } else if (cmd_dir == DMA_TO_DEVICE) {
+ data_direction = UTP_HOST_TO_DEVICE;
+ *upiu_flags = UPIU_CMD_FLAGS_WRITE;
+ } else {
+ data_direction = UTP_NO_DATA_TRANSFER;
+ *upiu_flags = UPIU_CMD_FLAGS_NONE;
+ }
+
+ dword_0 = data_direction | (lrbp->command_type
+ << UPIU_COMMAND_TYPE_OFFSET);
+ if (lrbp->intr_cmd)
+ dword_0 |= UTP_REQ_DESC_INT_CMD;
+
+ /* Transfer request descriptor header fields */
+ req_desc->header.dword_0 = cpu_to_le32(dword_0);
+ /* dword_1 is reserved, hence it is set to 0 */
+ req_desc->header.dword_1 = 0;
+ /*
+ * assigning invalid value for command status. Controller
+ * updates OCS on command completion, with the command
+ * status
+ */
+ req_desc->header.dword_2 =
+ cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
+ /* dword_3 is reserved, hence it is set to 0 */
+ req_desc->header.dword_3 = 0;
+
+ req_desc->prd_table_length = 0;
+}
+
+/**
+ * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
+ * for scsi commands
+ * @lrbp: local reference block pointer
+ * @upiu_flags: flags
+ */
+static
+void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
+{
+ struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
+ unsigned short cdb_len;
+
+ /* command descriptor fields */
+ ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
+ UPIU_TRANSACTION_COMMAND, upiu_flags,
+ lrbp->lun, lrbp->task_tag);
+ ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
+ UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
+
+ /* Total EHS length and Data segment length will be zero */
+ ucd_req_ptr->header.dword_2 = 0;
+
+ ucd_req_ptr->sc.exp_data_transfer_len =
+ cpu_to_be32(lrbp->cmd->sdb.length);
+
+ cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
+ memset(ucd_req_ptr->sc.cdb, 0, MAX_CDB_SIZE);
+ memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
+
+ memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
+}
+
+/**
+ * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
+ * for query requsts
+ * @hba: UFS hba
+ * @lrbp: local reference block pointer
+ * @upiu_flags: flags
+ */
+static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp, u32 upiu_flags)
+{
+ struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
+ struct ufs_query *query = &hba->dev_cmd.query;
+ u16 len = be16_to_cpu(query->request.upiu_req.length);
+ u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
+
+ /* Query request header */
+ ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
+ UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
+ lrbp->lun, lrbp->task_tag);
+ ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
+ 0, query->request.query_func, 0, 0);
+
+ /* Data segment length only need for WRITE_DESC */
+ if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
+ ucd_req_ptr->header.dword_2 =
+ UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
+ else
+ ucd_req_ptr->header.dword_2 = 0;
+
+ /* Copy the Query Request buffer as is */
+ memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
+ QUERY_OSF_SIZE);
+
+ /* Copy the Descriptor */
+ if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
+ memcpy(descp, query->descriptor, len);
+
+ memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
+}
+
+static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
+{
+ struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
+
+ memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
+
+ /* command descriptor fields */
+ ucd_req_ptr->header.dword_0 =
+ UPIU_HEADER_DWORD(
+ UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
+ /* clear rest of the fields of basic header */
+ ucd_req_ptr->header.dword_1 = 0;
+ ucd_req_ptr->header.dword_2 = 0;
+
+ memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
+}
+
+/**
+ * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
+ * for Device Management Purposes
+ * @hba: per adapter instance
+ * @lrbp: pointer to local reference block
+ */
+static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ u32 upiu_flags;
+ int ret = 0;
+
+ if ((hba->ufs_version == UFSHCI_VERSION_10) ||
+ (hba->ufs_version == UFSHCI_VERSION_11))
+ lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
+ else
+ lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
+
+ ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
+ if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
+ ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
+ else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
+ ufshcd_prepare_utp_nop_upiu(lrbp);
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+/**
+ * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
+ * for SCSI Purposes
+ * @hba: per adapter instance
+ * @lrbp: pointer to local reference block
+ */
+static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ u32 upiu_flags;
+ int ret = 0;
+
+ if ((hba->ufs_version == UFSHCI_VERSION_10) ||
+ (hba->ufs_version == UFSHCI_VERSION_11))
+ lrbp->command_type = UTP_CMD_TYPE_SCSI;
+ else
+ lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
+
+ if (likely(lrbp->cmd)) {
+ ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
+ lrbp->cmd->sc_data_direction);
+ ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/**
+ * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
+ * @upiu_wlun_id: UPIU W-LUN id
+ *
+ * Returns SCSI W-LUN id
+ */
+static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
+{
+ return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
+}
+
+/**
+ * ufshcd_queuecommand - main entry point for SCSI requests
+ * @host: SCSI host pointer
+ * @cmd: command from SCSI Midlayer
+ *
+ * Returns 0 for success, non-zero in case of failure
+ */
+static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
+{
+ struct ufshcd_lrb *lrbp;
+ struct ufs_hba *hba;
+ unsigned long flags;
+ int tag;
+ int err = 0;
+
+ hba = shost_priv(host);
+
+ tag = cmd->request->tag;
+ if (!ufshcd_valid_tag(hba, tag)) {
+ dev_err(hba->dev,
+ "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
+ __func__, tag, cmd, cmd->request);
+ BUG();
+ }
+
+ if (!down_read_trylock(&hba->clk_scaling_lock))
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ switch (hba->ufshcd_state) {
+ case UFSHCD_STATE_OPERATIONAL:
+ break;
+ case UFSHCD_STATE_EH_SCHEDULED:
+ case UFSHCD_STATE_RESET:
+ err = SCSI_MLQUEUE_HOST_BUSY;
+ goto out_unlock;
+ case UFSHCD_STATE_ERROR:
+ set_host_byte(cmd, DID_ERROR);
+ cmd->scsi_done(cmd);
+ goto out_unlock;
+ default:
+ dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
+ __func__, hba->ufshcd_state);
+ set_host_byte(cmd, DID_BAD_TARGET);
+ cmd->scsi_done(cmd);
+ goto out_unlock;
+ }
+
+ /* if error handling is in progress, don't issue commands */
+ if (ufshcd_eh_in_progress(hba)) {
+ set_host_byte(cmd, DID_ERROR);
+ cmd->scsi_done(cmd);
+ goto out_unlock;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ hba->req_abort_count = 0;
+
+ /* acquire the tag to make sure device cmds don't use it */
+ if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
+ /*
+ * Dev manage command in progress, requeue the command.
+ * Requeuing the command helps in cases where the request *may*
+ * find different tag instead of waiting for dev manage command
+ * completion.
+ */
+ err = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+
+ err = ufshcd_hold(hba, true);
+ if (err) {
+ err = SCSI_MLQUEUE_HOST_BUSY;
+ clear_bit_unlock(tag, &hba->lrb_in_use);
+ goto out;
+ }
+ WARN_ON(hba->clk_gating.state != CLKS_ON);
+
+ lrbp = &hba->lrb[tag];
+
+ WARN_ON(lrbp->cmd);
+ lrbp->cmd = cmd;
+ lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
+ lrbp->sense_buffer = cmd->sense_buffer;
+ lrbp->task_tag = tag;
+ lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
+ lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
+ lrbp->req_abort_skip = false;
+
+ ufshcd_comp_scsi_upiu(hba, lrbp);
+
+ err = ufshcd_map_sg(hba, lrbp);
+ if (err) {
+ lrbp->cmd = NULL;
+ clear_bit_unlock(tag, &hba->lrb_in_use);
+ goto out;
+ }
+ /* Make sure descriptors are ready before ringing the doorbell */
+ wmb();
+
+ /* issue command to the controller */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
+ ufshcd_send_command(hba, tag);
+out_unlock:
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+out:
+ up_read(&hba->clk_scaling_lock);
+ return err;
+}
+
+static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
+{
+ lrbp->cmd = NULL;
+ lrbp->sense_bufflen = 0;
+ lrbp->sense_buffer = NULL;
+ lrbp->task_tag = tag;
+ lrbp->lun = 0; /* device management cmd is not specific to any LUN */
+ lrbp->intr_cmd = true; /* No interrupt aggregation */
+ hba->dev_cmd.type = cmd_type;
+
+ return ufshcd_comp_devman_upiu(hba, lrbp);
+}
+
+static int
+ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
+{
+ int err = 0;
+ unsigned long flags;
+ u32 mask = 1 << tag;
+
+ /* clear outstanding transaction before retry */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_utrl_clear(hba, tag);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ /*
+ * wait for for h/w to clear corresponding bit in door-bell.
+ * max. wait is 1 sec.
+ */
+ err = ufshcd_wait_for_register(hba,
+ REG_UTP_TRANSFER_REQ_DOOR_BELL,
+ mask, ~mask, 1000, 1000, true);
+
+ return err;
+}
+
+static int
+ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
+
+ /* Get the UPIU response */
+ query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
+ UPIU_RSP_CODE_OFFSET;
+ return query_res->response;
+}
+
+/**
+ * ufshcd_dev_cmd_completion() - handles device management command responses
+ * @hba: per adapter instance
+ * @lrbp: pointer to local reference block
+ */
+static int
+ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ int resp;
+ int err = 0;
+
+ hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
+ resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
+
+ switch (resp) {
+ case UPIU_TRANSACTION_NOP_IN:
+ if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
+ err = -EINVAL;
+ dev_err(hba->dev, "%s: unexpected response %x\n",
+ __func__, resp);
+ }
+ break;
+ case UPIU_TRANSACTION_QUERY_RSP:
+ err = ufshcd_check_query_response(hba, lrbp);
+ if (!err)
+ err = ufshcd_copy_query_response(hba, lrbp);
+ break;
+ case UPIU_TRANSACTION_REJECT_UPIU:
+ /* TODO: handle Reject UPIU Response */
+ err = -EPERM;
+ dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
+ __func__);
+ break;
+ default:
+ err = -EINVAL;
+ dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
+ __func__, resp);
+ break;
+ }
+
+ return err;
+}
+
+static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp, int max_timeout)
+{
+ int err = 0;
+ unsigned long time_left;
+ unsigned long flags;
+
+ time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
+ msecs_to_jiffies(max_timeout));
+
+ /* Make sure descriptors are ready before ringing the doorbell */
+ wmb();
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->dev_cmd.complete = NULL;
+ if (likely(time_left)) {
+ err = ufshcd_get_tr_ocs(lrbp);
+ if (!err)
+ err = ufshcd_dev_cmd_completion(hba, lrbp);
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ if (!time_left) {
+ err = -ETIMEDOUT;
+ dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
+ __func__, lrbp->task_tag);
+ if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
+ /* successfully cleared the command, retry if needed */
+ err = -EAGAIN;
+ /*
+ * in case of an error, after clearing the doorbell,
+ * we also need to clear the outstanding_request
+ * field in hba
+ */
+ ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
+ }
+
+ return err;
+}
+
+/**
+ * ufshcd_get_dev_cmd_tag - Get device management command tag
+ * @hba: per-adapter instance
+ * @tag_out: pointer to variable with available slot value
+ *
+ * Get a free slot and lock it until device management command
+ * completes.
+ *
+ * Returns false if free slot is unavailable for locking, else
+ * return true with tag value in @tag.
+ */
+static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
+{
+ int tag;
+ bool ret = false;
+ unsigned long tmp;
+
+ if (!tag_out)
+ goto out;
+
+ do {
+ tmp = ~hba->lrb_in_use;
+ tag = find_last_bit(&tmp, hba->nutrs);
+ if (tag >= hba->nutrs)
+ goto out;
+ } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
+
+ *tag_out = tag;
+ ret = true;
+out:
+ return ret;
+}
+
+static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
+{
+ clear_bit_unlock(tag, &hba->lrb_in_use);
+}
+
+/**
+ * ufshcd_exec_dev_cmd - API for sending device management requests
+ * @hba: UFS hba
+ * @cmd_type: specifies the type (NOP, Query...)
+ * @timeout: time in seconds
+ *
+ * NOTE: Since there is only one available tag for device management commands,
+ * it is expected you hold the hba->dev_cmd.lock mutex.
+ */
+static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
+ enum dev_cmd_type cmd_type, int timeout)
+{
+ struct ufshcd_lrb *lrbp;
+ int err;
+ int tag;
+ struct completion wait;
+ unsigned long flags;
+
+ down_read(&hba->clk_scaling_lock);
+
+ /*
+ * Get free slot, sleep if slots are unavailable.
+ * Even though we use wait_event() which sleeps indefinitely,
+ * the maximum wait time is bounded by SCSI request timeout.
+ */
+ wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
+
+ init_completion(&wait);
+ lrbp = &hba->lrb[tag];
+ WARN_ON(lrbp->cmd);
+ err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
+ if (unlikely(err))
+ goto out_put_tag;
+
+ hba->dev_cmd.complete = &wait;
+
+ ufshcd_add_query_upiu_trace(hba, tag, "query_send");
+ /* Make sure descriptors are ready before ringing the doorbell */
+ wmb();
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
+ ufshcd_send_command(hba, tag);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
+
+ ufshcd_add_query_upiu_trace(hba, tag,
+ err ? "query_complete_err" : "query_complete");
+
+out_put_tag:
+ ufshcd_put_dev_cmd_tag(hba, tag);
+ wake_up(&hba->dev_cmd.tag_wq);
+ up_read(&hba->clk_scaling_lock);
+ return err;
+}
+
+/**
+ * ufshcd_init_query() - init the query response and request parameters
+ * @hba: per-adapter instance
+ * @request: address of the request pointer to be initialized
+ * @response: address of the response pointer to be initialized
+ * @opcode: operation to perform
+ * @idn: flag idn to access
+ * @index: LU number to access
+ * @selector: query/flag/descriptor further identification
+ */
+static inline void ufshcd_init_query(struct ufs_hba *hba,
+ struct ufs_query_req **request, struct ufs_query_res **response,
+ enum query_opcode opcode, u8 idn, u8 index, u8 selector)
+{
+ *request = &hba->dev_cmd.query.request;
+ *response = &hba->dev_cmd.query.response;
+ memset(*request, 0, sizeof(struct ufs_query_req));
+ memset(*response, 0, sizeof(struct ufs_query_res));
+ (*request)->upiu_req.opcode = opcode;
+ (*request)->upiu_req.idn = idn;
+ (*request)->upiu_req.index = index;
+ (*request)->upiu_req.selector = selector;
+}
+
+static int ufshcd_query_flag_retry(struct ufs_hba *hba,
+ enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
+{
+ int ret;
+ int retries;
+
+ for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
+ ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
+ if (ret)
+ dev_dbg(hba->dev,
+ "%s: failed with error %d, retries %d\n",
+ __func__, ret, retries);
+ else
+ break;
+ }
+
+ if (ret)
+ dev_err(hba->dev,
+ "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
+ __func__, opcode, idn, ret, retries);
+ return ret;
+}
+
+/**
+ * ufshcd_query_flag() - API function for sending flag query requests
+ * @hba: per-adapter instance
+ * @opcode: flag query to perform
+ * @idn: flag idn to access
+ * @flag_res: the flag value after the query request completes
+ *
+ * Returns 0 for success, non-zero in case of failure
+ */
+int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
+ enum flag_idn idn, bool *flag_res)
+{
+ struct ufs_query_req *request = NULL;
+ struct ufs_query_res *response = NULL;
+ int err, index = 0, selector = 0;
+ int timeout = QUERY_REQ_TIMEOUT;
+
+ BUG_ON(!hba);
+
+ ufshcd_hold(hba, false);
+ mutex_lock(&hba->dev_cmd.lock);
+ ufshcd_init_query(hba, &request, &response, opcode, idn, index,
+ selector);
+
+ switch (opcode) {
+ case UPIU_QUERY_OPCODE_SET_FLAG:
+ case UPIU_QUERY_OPCODE_CLEAR_FLAG:
+ case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
+ break;
+ case UPIU_QUERY_OPCODE_READ_FLAG:
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
+ if (!flag_res) {
+ /* No dummy reads */
+ dev_err(hba->dev, "%s: Invalid argument for read request\n",
+ __func__);
+ err = -EINVAL;
+ goto out_unlock;
+ }
+ break;
+ default:
+ dev_err(hba->dev,
+ "%s: Expected query flag opcode but got = %d\n",
+ __func__, opcode);
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
+
+ if (err) {
+ dev_err(hba->dev,
+ "%s: Sending flag query for idn %d failed, err = %d\n",
+ __func__, idn, err);
+ goto out_unlock;
+ }
+
+ if (flag_res)
+ *flag_res = (be32_to_cpu(response->upiu_res.value) &
+ MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
+
+out_unlock:
+ mutex_unlock(&hba->dev_cmd.lock);
+ ufshcd_release(hba);
+ return err;
+}
+
+/**
+ * ufshcd_query_attr - API function for sending attribute requests
+ * @hba: per-adapter instance
+ * @opcode: attribute opcode
+ * @idn: attribute idn to access
+ * @index: index field
+ * @selector: selector field
+ * @attr_val: the attribute value after the query request completes
+ *
+ * Returns 0 for success, non-zero in case of failure
+*/
+int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
+ enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
+{
+ struct ufs_query_req *request = NULL;
+ struct ufs_query_res *response = NULL;
+ int err;
+
+ BUG_ON(!hba);
+
+ ufshcd_hold(hba, false);
+ if (!attr_val) {
+ dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
+ __func__, opcode);
+ err = -EINVAL;
+ goto out;
+ }
+
+ mutex_lock(&hba->dev_cmd.lock);
+ ufshcd_init_query(hba, &request, &response, opcode, idn, index,
+ selector);
+
+ switch (opcode) {
+ case UPIU_QUERY_OPCODE_WRITE_ATTR:
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
+ request->upiu_req.value = cpu_to_be32(*attr_val);
+ break;
+ case UPIU_QUERY_OPCODE_READ_ATTR:
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
+ break;
+ default:
+ dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
+ __func__, opcode);
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+
+ if (err) {
+ dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
+ __func__, opcode, idn, index, err);
+ goto out_unlock;
+ }
+
+ *attr_val = be32_to_cpu(response->upiu_res.value);
+
+out_unlock:
+ mutex_unlock(&hba->dev_cmd.lock);
+out:
+ ufshcd_release(hba);
+ return err;
+}
+
+/**
+ * ufshcd_query_attr_retry() - API function for sending query
+ * attribute with retries
+ * @hba: per-adapter instance
+ * @opcode: attribute opcode
+ * @idn: attribute idn to access
+ * @index: index field
+ * @selector: selector field
+ * @attr_val: the attribute value after the query request
+ * completes
+ *
+ * Returns 0 for success, non-zero in case of failure
+*/
+static int ufshcd_query_attr_retry(struct ufs_hba *hba,
+ enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
+ u32 *attr_val)
+{
+ int ret = 0;
+ u32 retries;
+
+ for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+ ret = ufshcd_query_attr(hba, opcode, idn, index,
+ selector, attr_val);
+ if (ret)
+ dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
+ __func__, ret, retries);
+ else
+ break;
+ }
+
+ if (ret)
+ dev_err(hba->dev,
+ "%s: query attribute, idn %d, failed with error %d after %d retires\n",
+ __func__, idn, ret, QUERY_REQ_RETRIES);
+ return ret;
+}
+
+static int __ufshcd_query_descriptor(struct ufs_hba *hba,
+ enum query_opcode opcode, enum desc_idn idn, u8 index,
+ u8 selector, u8 *desc_buf, int *buf_len)
+{
+ struct ufs_query_req *request = NULL;
+ struct ufs_query_res *response = NULL;
+ int err;
+
+ BUG_ON(!hba);
+
+ ufshcd_hold(hba, false);
+ if (!desc_buf) {
+ dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
+ __func__, opcode);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
+ dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
+ __func__, *buf_len);
+ err = -EINVAL;
+ goto out;
+ }
+
+ mutex_lock(&hba->dev_cmd.lock);
+ ufshcd_init_query(hba, &request, &response, opcode, idn, index,
+ selector);
+ hba->dev_cmd.query.descriptor = desc_buf;
+ request->upiu_req.length = cpu_to_be16(*buf_len);
+
+ switch (opcode) {
+ case UPIU_QUERY_OPCODE_WRITE_DESC:
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
+ break;
+ case UPIU_QUERY_OPCODE_READ_DESC:
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
+ break;
+ default:
+ dev_err(hba->dev,
+ "%s: Expected query descriptor opcode but got = 0x%.2x\n",
+ __func__, opcode);
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+
+ if (err) {
+ dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
+ __func__, opcode, idn, index, err);
+ goto out_unlock;
+ }
+
+ hba->dev_cmd.query.descriptor = NULL;
+ *buf_len = be16_to_cpu(response->upiu_res.length);
+
+out_unlock:
+ mutex_unlock(&hba->dev_cmd.lock);
+out:
+ ufshcd_release(hba);
+ return err;
+}
+
+/**
+ * ufshcd_query_descriptor_retry - API function for sending descriptor requests
+ * @hba: per-adapter instance
+ * @opcode: attribute opcode
+ * @idn: attribute idn to access
+ * @index: index field
+ * @selector: selector field
+ * @desc_buf: the buffer that contains the descriptor
+ * @buf_len: length parameter passed to the device
+ *
+ * Returns 0 for success, non-zero in case of failure.
+ * The buf_len parameter will contain, on return, the length parameter
+ * received on the response.
+ */
+int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
+ enum query_opcode opcode,
+ enum desc_idn idn, u8 index,
+ u8 selector,
+ u8 *desc_buf, int *buf_len)
+{
+ int err;
+ int retries;
+
+ for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+ err = __ufshcd_query_descriptor(hba, opcode, idn, index,
+ selector, desc_buf, buf_len);
+ if (!err || err == -EINVAL)
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * ufshcd_read_desc_length - read the specified descriptor length from header
+ * @hba: Pointer to adapter instance
+ * @desc_id: descriptor idn value
+ * @desc_index: descriptor index
+ * @desc_length: pointer to variable to read the length of descriptor
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+static int ufshcd_read_desc_length(struct ufs_hba *hba,
+ enum desc_idn desc_id,
+ int desc_index,
+ int *desc_length)
+{
+ int ret;
+ u8 header[QUERY_DESC_HDR_SIZE];
+ int header_len = QUERY_DESC_HDR_SIZE;
+
+ if (desc_id >= QUERY_DESC_IDN_MAX)
+ return -EINVAL;
+
+ ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
+ desc_id, desc_index, 0, header,
+ &header_len);
+
+ if (ret) {
+ dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
+ __func__, desc_id);
+ return ret;
+ } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
+ dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
+ __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
+ desc_id);
+ ret = -EINVAL;
+ }
+
+ *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
+ return ret;
+
+}
+
+/**
+ * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
+ * @hba: Pointer to adapter instance
+ * @desc_id: descriptor idn value
+ * @desc_len: mapped desc length (out)
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+int ufshcd_map_desc_id_to_length(struct ufs_hba *hba,
+ enum desc_idn desc_id, int *desc_len)
+{
+ switch (desc_id) {
+ case QUERY_DESC_IDN_DEVICE:
+ *desc_len = hba->desc_size.dev_desc;
+ break;
+ case QUERY_DESC_IDN_POWER:
+ *desc_len = hba->desc_size.pwr_desc;
+ break;
+ case QUERY_DESC_IDN_GEOMETRY:
+ *desc_len = hba->desc_size.geom_desc;
+ break;
+ case QUERY_DESC_IDN_CONFIGURATION:
+ *desc_len = hba->desc_size.conf_desc;
+ break;
+ case QUERY_DESC_IDN_UNIT:
+ *desc_len = hba->desc_size.unit_desc;
+ break;
+ case QUERY_DESC_IDN_INTERCONNECT:
+ *desc_len = hba->desc_size.interc_desc;
+ break;
+ case QUERY_DESC_IDN_STRING:
+ *desc_len = QUERY_DESC_MAX_SIZE;
+ break;
+ case QUERY_DESC_IDN_HEALTH:
+ *desc_len = hba->desc_size.hlth_desc;
+ break;
+ case QUERY_DESC_IDN_RFU_0:
+ case QUERY_DESC_IDN_RFU_1:
+ *desc_len = 0;
+ break;
+ default:
+ *desc_len = 0;
+ return -EINVAL;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
+
+/**
+ * ufshcd_read_desc_param - read the specified descriptor parameter
+ * @hba: Pointer to adapter instance
+ * @desc_id: descriptor idn value
+ * @desc_index: descriptor index
+ * @param_offset: offset of the parameter to read
+ * @param_read_buf: pointer to buffer where parameter would be read
+ * @param_size: sizeof(param_read_buf)
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+int ufshcd_read_desc_param(struct ufs_hba *hba,
+ enum desc_idn desc_id,
+ int desc_index,
+ u8 param_offset,
+ u8 *param_read_buf,
+ u8 param_size)
+{
+ int ret;
+ u8 *desc_buf;
+ int buff_len;
+ bool is_kmalloc = true;
+
+ /* Safety check */
+ if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
+ return -EINVAL;
+
+ /* Get the max length of descriptor from structure filled up at probe
+ * time.
+ */
+ ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
+
+ /* Sanity checks */
+ if (ret || !buff_len) {
+ dev_err(hba->dev, "%s: Failed to get full descriptor length",
+ __func__);
+ return ret;
+ }
+
+ /* Check whether we need temp memory */
+ if (param_offset != 0 || param_size < buff_len) {
+ desc_buf = kmalloc(buff_len, GFP_KERNEL);
+ if (!desc_buf)
+ return -ENOMEM;
+ } else {
+ desc_buf = param_read_buf;
+ is_kmalloc = false;
+ }
+
+ /* Request for full descriptor */
+ ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
+ desc_id, desc_index, 0,
+ desc_buf, &buff_len);
+
+ if (ret) {
+ dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
+ __func__, desc_id, desc_index, param_offset, ret);
+ goto out;
+ }
+
+ /* Sanity check */
+ if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
+ dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
+ __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Check wherher we will not copy more data, than available */
+ if (is_kmalloc && param_size > buff_len)
+ param_size = buff_len;
+
+ if (is_kmalloc)
+ memcpy(param_read_buf, &desc_buf[param_offset], param_size);
+out:
+ if (is_kmalloc)
+ kfree(desc_buf);
+ return ret;
+}
+
+static inline int ufshcd_read_desc(struct ufs_hba *hba,
+ enum desc_idn desc_id,
+ int desc_index,
+ u8 *buf,
+ u32 size)
+{
+ return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
+}
+
+static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
+ u8 *buf,
+ u32 size)
+{
+ return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
+}
+
+static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
+{
+ return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
+}
+
+/**
+ * ufshcd_read_string_desc - read string descriptor
+ * @hba: pointer to adapter instance
+ * @desc_index: descriptor index
+ * @buf: pointer to buffer where descriptor would be read
+ * @size: size of buf
+ * @ascii: if true convert from unicode to ascii characters
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index,
+ u8 *buf, u32 size, bool ascii)
+{
+ int err = 0;
+
+ err = ufshcd_read_desc(hba,
+ QUERY_DESC_IDN_STRING, desc_index, buf, size);
+
+ if (err) {
+ dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
+ __func__, QUERY_REQ_RETRIES, err);
+ goto out;
+ }
+
+ if (ascii) {
+ int desc_len;
+ int ascii_len;
+ int i;
+ char *buff_ascii;
+
+ desc_len = buf[0];
+ /* remove header and divide by 2 to move from UTF16 to UTF8 */
+ ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
+ if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
+ dev_err(hba->dev, "%s: buffer allocated size is too small\n",
+ __func__);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
+ if (!buff_ascii) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * the descriptor contains string in UTF16 format
+ * we need to convert to utf-8 so it can be displayed
+ */
+ utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE],
+ desc_len - QUERY_DESC_HDR_SIZE,
+ UTF16_BIG_ENDIAN, buff_ascii, ascii_len);
+
+ /* replace non-printable or non-ASCII characters with spaces */
+ for (i = 0; i < ascii_len; i++)
+ ufshcd_remove_non_printable(&buff_ascii[i]);
+
+ memset(buf + QUERY_DESC_HDR_SIZE, 0,
+ size - QUERY_DESC_HDR_SIZE);
+ memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
+ buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
+ kfree(buff_ascii);
+ }
+out:
+ return err;
+}
+
+/**
+ * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
+ * @hba: Pointer to adapter instance
+ * @lun: lun id
+ * @param_offset: offset of the parameter to read
+ * @param_read_buf: pointer to buffer where parameter would be read
+ * @param_size: sizeof(param_read_buf)
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
+ int lun,
+ enum unit_desc_param param_offset,
+ u8 *param_read_buf,
+ u32 param_size)
+{
+ /*
+ * Unit descriptors are only available for general purpose LUs (LUN id
+ * from 0 to 7) and RPMB Well known LU.
+ */
+ if (!ufs_is_valid_unit_desc_lun(lun))
+ return -EOPNOTSUPP;
+
+ return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
+ param_offset, param_read_buf, param_size);
+}
+
+/**
+ * ufshcd_memory_alloc - allocate memory for host memory space data structures
+ * @hba: per adapter instance
+ *
+ * 1. Allocate DMA memory for Command Descriptor array
+ * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
+ * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
+ * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
+ * (UTMRDL)
+ * 4. Allocate memory for local reference block(lrb).
+ *
+ * Returns 0 for success, non-zero in case of failure
+ */
+static int ufshcd_memory_alloc(struct ufs_hba *hba)
+{
+ size_t utmrdl_size, utrdl_size, ucdl_size;
+
+ /* Allocate memory for UTP command descriptors */
+ ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
+ hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
+ ucdl_size,
+ &hba->ucdl_dma_addr,
+ GFP_KERNEL);
+
+ /*
+ * UFSHCI requires UTP command descriptor to be 128 byte aligned.
+ * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
+ * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
+ * be aligned to 128 bytes as well
+ */
+ if (!hba->ucdl_base_addr ||
+ WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
+ dev_err(hba->dev,
+ "Command Descriptor Memory allocation failed\n");
+ goto out;
+ }
+
+ /*
+ * Allocate memory for UTP Transfer descriptors
+ * UFSHCI requires 1024 byte alignment of UTRD
+ */
+ utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
+ hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
+ utrdl_size,
+ &hba->utrdl_dma_addr,
+ GFP_KERNEL);
+ if (!hba->utrdl_base_addr ||
+ WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
+ dev_err(hba->dev,
+ "Transfer Descriptor Memory allocation failed\n");
+ goto out;
+ }
+
+ /*
+ * Allocate memory for UTP Task Management descriptors
+ * UFSHCI requires 1024 byte alignment of UTMRD
+ */
+ utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
+ hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
+ utmrdl_size,
+ &hba->utmrdl_dma_addr,
+ GFP_KERNEL);
+ if (!hba->utmrdl_base_addr ||
+ WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
+ dev_err(hba->dev,
+ "Task Management Descriptor Memory allocation failed\n");
+ goto out;
+ }
+
+ /* Allocate memory for local reference block */
+ hba->lrb = devm_kcalloc(hba->dev,
+ hba->nutrs, sizeof(struct ufshcd_lrb),
+ GFP_KERNEL);
+ if (!hba->lrb) {
+ dev_err(hba->dev, "LRB Memory allocation failed\n");
+ goto out;
+ }
+ return 0;
+out:
+ return -ENOMEM;
+}
+
+/**
+ * ufshcd_host_memory_configure - configure local reference block with
+ * memory offsets
+ * @hba: per adapter instance
+ *
+ * Configure Host memory space
+ * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
+ * address.
+ * 2. Update each UTRD with Response UPIU offset, Response UPIU length
+ * and PRDT offset.
+ * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
+ * into local reference block.
+ */
+static void ufshcd_host_memory_configure(struct ufs_hba *hba)
+{
+ struct utp_transfer_cmd_desc *cmd_descp;
+ struct utp_transfer_req_desc *utrdlp;
+ dma_addr_t cmd_desc_dma_addr;
+ dma_addr_t cmd_desc_element_addr;
+ u16 response_offset;
+ u16 prdt_offset;
+ int cmd_desc_size;
+ int i;
+
+ utrdlp = hba->utrdl_base_addr;
+ cmd_descp = hba->ucdl_base_addr;
+
+ response_offset =
+ offsetof(struct utp_transfer_cmd_desc, response_upiu);
+ prdt_offset =
+ offsetof(struct utp_transfer_cmd_desc, prd_table);
+
+ cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
+ cmd_desc_dma_addr = hba->ucdl_dma_addr;
+
+ for (i = 0; i < hba->nutrs; i++) {
+ /* Configure UTRD with command descriptor base address */
+ cmd_desc_element_addr =
+ (cmd_desc_dma_addr + (cmd_desc_size * i));
+ utrdlp[i].command_desc_base_addr_lo =
+ cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
+ utrdlp[i].command_desc_base_addr_hi =
+ cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
+
+ /* Response upiu and prdt offset should be in double words */
+ if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
+ utrdlp[i].response_upiu_offset =
+ cpu_to_le16(response_offset);
+ utrdlp[i].prd_table_offset =
+ cpu_to_le16(prdt_offset);
+ utrdlp[i].response_upiu_length =
+ cpu_to_le16(ALIGNED_UPIU_SIZE);
+ } else {
+ utrdlp[i].response_upiu_offset =
+ cpu_to_le16((response_offset >> 2));
+ utrdlp[i].prd_table_offset =
+ cpu_to_le16((prdt_offset >> 2));
+ utrdlp[i].response_upiu_length =
+ cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
+ }
+
+ hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
+ hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr +
+ (i * sizeof(struct utp_transfer_req_desc));
+ hba->lrb[i].ucd_req_ptr =
+ (struct utp_upiu_req *)(cmd_descp + i);
+ hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr;
+ hba->lrb[i].ucd_rsp_ptr =
+ (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
+ hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr +
+ response_offset;
+ hba->lrb[i].ucd_prdt_ptr =
+ (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
+ hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr +
+ prdt_offset;
+ }
+}
+
+/**
+ * ufshcd_dme_link_startup - Notify Unipro to perform link startup
+ * @hba: per adapter instance
+ *
+ * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
+ * in order to initialize the Unipro link startup procedure.
+ * Once the Unipro links are up, the device connected to the controller
+ * is detected.
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_dme_link_startup(struct ufs_hba *hba)
+{
+ struct uic_command uic_cmd = {0};
+ int ret;
+
+ uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
+
+ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+ if (ret)
+ dev_dbg(hba->dev,
+ "dme-link-startup: error code %d\n", ret);
+ return ret;
+}
+/**
+ * ufshcd_dme_reset - UIC command for DME_RESET
+ * @hba: per adapter instance
+ *
+ * DME_RESET command is issued in order to reset UniPro stack.
+ * This function now deal with cold reset.
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_dme_reset(struct ufs_hba *hba)
+{
+ struct uic_command uic_cmd = {0};
+ int ret;
+
+ uic_cmd.command = UIC_CMD_DME_RESET;
+
+ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+ if (ret)
+ dev_err(hba->dev,
+ "dme-reset: error code %d\n", ret);
+
+ return ret;
+}
+
+/**
+ * ufshcd_dme_enable - UIC command for DME_ENABLE
+ * @hba: per adapter instance
+ *
+ * DME_ENABLE command is issued in order to enable UniPro stack.
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_dme_enable(struct ufs_hba *hba)
+{
+ struct uic_command uic_cmd = {0};
+ int ret;
+
+ uic_cmd.command = UIC_CMD_DME_ENABLE;
+
+ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+ if (ret)
+ dev_err(hba->dev,
+ "dme-reset: error code %d\n", ret);
+
+ return ret;
+}
+
+static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
+{
+ #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
+ unsigned long min_sleep_time_us;
+
+ if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
+ return;
+
+ /*
+ * last_dme_cmd_tstamp will be 0 only for 1st call to
+ * this function
+ */
+ if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
+ min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
+ } else {
+ unsigned long delta =
+ (unsigned long) ktime_to_us(
+ ktime_sub(ktime_get(),
+ hba->last_dme_cmd_tstamp));
+
+ if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
+ min_sleep_time_us =
+ MIN_DELAY_BEFORE_DME_CMDS_US - delta;
+ else
+ return; /* no more delay required */
+ }
+
+ /* allow sleep for extra 50us if needed */
+ usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
+}
+
+/**
+ * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
+ * @hba: per adapter instance
+ * @attr_sel: uic command argument1
+ * @attr_set: attribute set type as uic command argument2
+ * @mib_val: setting value as uic command argument3
+ * @peer: indicate whether peer or local
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
+ u8 attr_set, u32 mib_val, u8 peer)
+{
+ struct uic_command uic_cmd = {0};
+ static const char *const action[] = {
+ "dme-set",
+ "dme-peer-set"
+ };
+ const char *set = action[!!peer];
+ int ret;
+ int retries = UFS_UIC_COMMAND_RETRIES;
+
+ uic_cmd.command = peer ?
+ UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
+ uic_cmd.argument1 = attr_sel;
+ uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
+ uic_cmd.argument3 = mib_val;
+
+ do {
+ /* for peer attributes we retry upon failure */
+ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+ if (ret)
+ dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
+ set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
+ } while (ret && peer && --retries);
+
+ if (ret)
+ dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
+ set, UIC_GET_ATTR_ID(attr_sel), mib_val,
+ UFS_UIC_COMMAND_RETRIES - retries);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
+
+/**
+ * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
+ * @hba: per adapter instance
+ * @attr_sel: uic command argument1
+ * @mib_val: the value of the attribute as returned by the UIC command
+ * @peer: indicate whether peer or local
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
+ u32 *mib_val, u8 peer)
+{
+ struct uic_command uic_cmd = {0};
+ static const char *const action[] = {
+ "dme-get",
+ "dme-peer-get"
+ };
+ const char *get = action[!!peer];
+ int ret;
+ int retries = UFS_UIC_COMMAND_RETRIES;
+ struct ufs_pa_layer_attr orig_pwr_info;
+ struct ufs_pa_layer_attr temp_pwr_info;
+ bool pwr_mode_change = false;
+
+ if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
+ orig_pwr_info = hba->pwr_info;
+ temp_pwr_info = orig_pwr_info;
+
+ if (orig_pwr_info.pwr_tx == FAST_MODE ||
+ orig_pwr_info.pwr_rx == FAST_MODE) {
+ temp_pwr_info.pwr_tx = FASTAUTO_MODE;
+ temp_pwr_info.pwr_rx = FASTAUTO_MODE;
+ pwr_mode_change = true;
+ } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
+ orig_pwr_info.pwr_rx == SLOW_MODE) {
+ temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
+ temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
+ pwr_mode_change = true;
+ }
+ if (pwr_mode_change) {
+ ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
+ if (ret)
+ goto out;
+ }
+ }
+
+ uic_cmd.command = peer ?
+ UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
+ uic_cmd.argument1 = attr_sel;
+
+ do {
+ /* for peer attributes we retry upon failure */
+ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+ if (ret)
+ dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
+ get, UIC_GET_ATTR_ID(attr_sel), ret);
+ } while (ret && peer && --retries);
+
+ if (ret)
+ dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
+ get, UIC_GET_ATTR_ID(attr_sel),
+ UFS_UIC_COMMAND_RETRIES - retries);
+
+ if (mib_val && !ret)
+ *mib_val = uic_cmd.argument3;
+
+ if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
+ && pwr_mode_change)
+ ufshcd_change_power_mode(hba, &orig_pwr_info);
+out:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
+
+/**
+ * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
+ * state) and waits for it to take effect.
+ *
+ * @hba: per adapter instance
+ * @cmd: UIC command to execute
+ *
+ * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
+ * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
+ * and device UniPro link and hence it's final completion would be indicated by
+ * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
+ * addition to normal UIC command completion Status (UCCS). This function only
+ * returns after the relevant status bits indicate the completion.
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
+{
+ struct completion uic_async_done;
+ unsigned long flags;
+ u8 status;
+ int ret;
+ bool reenable_intr = false;
+
+ mutex_lock(&hba->uic_cmd_mutex);
+ init_completion(&uic_async_done);
+ ufshcd_add_delay_before_dme_cmd(hba);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->uic_async_done = &uic_async_done;
+ if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
+ ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
+ /*
+ * Make sure UIC command completion interrupt is disabled before
+ * issuing UIC command.
+ */
+ wmb();
+ reenable_intr = true;
+ }
+ ret = __ufshcd_send_uic_cmd(hba, cmd, false);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ if (ret) {
+ dev_err(hba->dev,
+ "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
+ cmd->command, cmd->argument3, ret);
+ goto out;
+ }
+
+ if (!wait_for_completion_timeout(hba->uic_async_done,
+ msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
+ dev_err(hba->dev,
+ "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
+ cmd->command, cmd->argument3);
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ status = ufshcd_get_upmcrs(hba);
+ if (status != PWR_LOCAL) {
+ dev_err(hba->dev,
+ "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
+ cmd->command, status);
+ ret = (status != PWR_OK) ? status : -1;
+ }
+out:
+ if (ret) {
+ ufshcd_print_host_state(hba);
+ ufshcd_print_pwr_info(hba);
+ ufshcd_print_host_regs(hba);
+ }
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->active_uic_cmd = NULL;
+ hba->uic_async_done = NULL;
+ if (reenable_intr)
+ ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ mutex_unlock(&hba->uic_cmd_mutex);
+
+ return ret;
+}
+
+/**
+ * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
+ * using DME_SET primitives.
+ * @hba: per adapter instance
+ * @mode: powr mode value
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
+{
+ struct uic_command uic_cmd = {0};
+ int ret;
+
+ if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
+ ret = ufshcd_dme_set(hba,
+ UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
+ if (ret) {
+ dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
+ __func__, ret);
+ goto out;
+ }
+ }
+
+ uic_cmd.command = UIC_CMD_DME_SET;
+ uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
+ uic_cmd.argument3 = mode;
+ ufshcd_hold(hba, false);
+ ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+ ufshcd_release(hba);
+
+out:
+ return ret;
+}
+
+static int ufshcd_link_recovery(struct ufs_hba *hba)
+{
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->ufshcd_state = UFSHCD_STATE_RESET;
+ ufshcd_set_eh_in_progress(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ ret = ufshcd_host_reset_and_restore(hba);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (ret)
+ hba->ufshcd_state = UFSHCD_STATE_ERROR;
+ ufshcd_clear_eh_in_progress(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ if (ret)
+ dev_err(hba->dev, "%s: link recovery failed, err %d",
+ __func__, ret);
+
+ return ret;
+}
+
+static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
+{
+ int ret;
+ struct uic_command uic_cmd = {0};
+ ktime_t start = ktime_get();
+
+ ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
+
+ uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
+ ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+ trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
+ ktime_to_us(ktime_sub(ktime_get(), start)), ret);
+
+ if (ret) {
+ dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
+ __func__, ret);
+
+ /*
+ * If link recovery fails then return error so that caller
+ * don't retry the hibern8 enter again.
+ */
+ if (ufshcd_link_recovery(hba))
+ ret = -ENOLINK;
+ } else
+ ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
+ POST_CHANGE);
+
+ return ret;
+}
+
+static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
+{
+ int ret = 0, retries;
+
+ for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
+ ret = __ufshcd_uic_hibern8_enter(hba);
+ if (!ret || ret == -ENOLINK)
+ goto out;
+ }
+out:
+ return ret;
+}
+
+static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
+{
+ struct uic_command uic_cmd = {0};
+ int ret;
+ ktime_t start = ktime_get();
+
+ ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
+
+ uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
+ ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+ trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
+ ktime_to_us(ktime_sub(ktime_get(), start)), ret);
+
+ if (ret) {
+ dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
+ __func__, ret);
+ ret = ufshcd_link_recovery(hba);
+ } else {
+ ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
+ POST_CHANGE);
+ hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
+ hba->ufs_stats.hibern8_exit_cnt++;
+ }
+
+ return ret;
+}
+
+static void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
+{
+ unsigned long flags;
+
+ if (!(hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) || !hba->ahit)
+ return;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+
+ /**
+ * ufshcd_init_pwr_info - setting the POR (power on reset)
+ * values in hba power info
+ * @hba: per-adapter instance
+ */
+static void ufshcd_init_pwr_info(struct ufs_hba *hba)
+{
+ hba->pwr_info.gear_rx = UFS_PWM_G1;
+ hba->pwr_info.gear_tx = UFS_PWM_G1;
+ hba->pwr_info.lane_rx = 1;
+ hba->pwr_info.lane_tx = 1;
+ hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
+ hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
+ hba->pwr_info.hs_rate = 0;
+}
+
+/**
+ * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
+ * @hba: per-adapter instance
+ */
+static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
+{
+ struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
+
+ if (hba->max_pwr_info.is_valid)
+ return 0;
+
+ pwr_info->pwr_tx = FAST_MODE;
+ pwr_info->pwr_rx = FAST_MODE;
+ pwr_info->hs_rate = PA_HS_MODE_B;
+
+ /* Get the connected lane count */
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
+ &pwr_info->lane_rx);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
+ &pwr_info->lane_tx);
+
+ if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
+ dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
+ __func__,
+ pwr_info->lane_rx,
+ pwr_info->lane_tx);
+ return -EINVAL;
+ }
+
+ /*
+ * First, get the maximum gears of HS speed.
+ * If a zero value, it means there is no HSGEAR capability.
+ * Then, get the maximum gears of PWM speed.
+ */
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
+ if (!pwr_info->gear_rx) {
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
+ &pwr_info->gear_rx);
+ if (!pwr_info->gear_rx) {
+ dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
+ __func__, pwr_info->gear_rx);
+ return -EINVAL;
+ }
+ pwr_info->pwr_rx = SLOW_MODE;
+ }
+
+ ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
+ &pwr_info->gear_tx);
+ if (!pwr_info->gear_tx) {
+ ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
+ &pwr_info->gear_tx);
+ if (!pwr_info->gear_tx) {
+ dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
+ __func__, pwr_info->gear_tx);
+ return -EINVAL;
+ }
+ pwr_info->pwr_tx = SLOW_MODE;
+ }
+
+ hba->max_pwr_info.is_valid = true;
+ return 0;
+}
+
+static int ufshcd_change_power_mode(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *pwr_mode)
+{
+ int ret;
+
+ /* if already configured to the requested pwr_mode */
+ if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
+ pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
+ pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
+ pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
+ pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
+ pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
+ pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
+ dev_dbg(hba->dev, "%s: power already configured\n", __func__);
+ return 0;
+ }
+
+ /*
+ * Configure attributes for power mode change with below.
+ * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
+ * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
+ * - PA_HSSERIES
+ */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
+ pwr_mode->lane_rx);
+ if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
+ pwr_mode->pwr_rx == FAST_MODE)
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
+ else
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
+ pwr_mode->lane_tx);
+ if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
+ pwr_mode->pwr_tx == FAST_MODE)
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
+ else
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
+
+ if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
+ pwr_mode->pwr_tx == FASTAUTO_MODE ||
+ pwr_mode->pwr_rx == FAST_MODE ||
+ pwr_mode->pwr_tx == FAST_MODE)
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
+ pwr_mode->hs_rate);
+
+ ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
+ | pwr_mode->pwr_tx);
+
+ if (ret) {
+ dev_err(hba->dev,
+ "%s: power mode change failed %d\n", __func__, ret);
+ } else {
+ ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
+ pwr_mode);
+
+ memcpy(&hba->pwr_info, pwr_mode,
+ sizeof(struct ufs_pa_layer_attr));
+ }
+
+ return ret;
+}
+
+/**
+ * ufshcd_config_pwr_mode - configure a new power mode
+ * @hba: per-adapter instance
+ * @desired_pwr_mode: desired power configuration
+ */
+int ufshcd_config_pwr_mode(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *desired_pwr_mode)
+{
+ struct ufs_pa_layer_attr final_params = { 0 };
+ int ret;
+
+ ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
+ desired_pwr_mode, &final_params);
+
+ if (ret)
+ memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
+
+ ret = ufshcd_change_power_mode(hba, &final_params);
+ if (!ret)
+ ufshcd_print_pwr_info(hba);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
+
+/**
+ * ufshcd_complete_dev_init() - checks device readiness
+ * @hba: per-adapter instance
+ *
+ * Set fDeviceInit flag and poll until device toggles it.
+ */
+static int ufshcd_complete_dev_init(struct ufs_hba *hba)
+{
+ int i;
+ int err;
+ bool flag_res = 1;
+
+ err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
+ QUERY_FLAG_IDN_FDEVICEINIT, NULL);
+ if (err) {
+ dev_err(hba->dev,
+ "%s setting fDeviceInit flag failed with error %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ /* poll for max. 1000 iterations for fDeviceInit flag to clear */
+ for (i = 0; i < 1000 && !err && flag_res; i++)
+ err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
+ QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
+
+ if (err)
+ dev_err(hba->dev,
+ "%s reading fDeviceInit flag failed with error %d\n",
+ __func__, err);
+ else if (flag_res)
+ dev_err(hba->dev,
+ "%s fDeviceInit was not cleared by the device\n",
+ __func__);
+
+out:
+ return err;
+}
+
+/**
+ * ufshcd_make_hba_operational - Make UFS controller operational
+ * @hba: per adapter instance
+ *
+ * To bring UFS host controller to operational state,
+ * 1. Enable required interrupts
+ * 2. Configure interrupt aggregation
+ * 3. Program UTRL and UTMRL base address
+ * 4. Configure run-stop-registers
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_make_hba_operational(struct ufs_hba *hba)
+{
+ int err = 0;
+ u32 reg;
+
+ /* Enable required interrupts */
+ ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
+
+ /* Configure interrupt aggregation */
+ if (ufshcd_is_intr_aggr_allowed(hba))
+ ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
+ else
+ ufshcd_disable_intr_aggr(hba);
+
+ /* Configure UTRL and UTMRL base address registers */
+ ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
+ REG_UTP_TRANSFER_REQ_LIST_BASE_L);
+ ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
+ REG_UTP_TRANSFER_REQ_LIST_BASE_H);
+ ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
+ REG_UTP_TASK_REQ_LIST_BASE_L);
+ ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
+ REG_UTP_TASK_REQ_LIST_BASE_H);
+
+ /*
+ * Make sure base address and interrupt setup are updated before
+ * enabling the run/stop registers below.
+ */
+ wmb();
+
+ /*
+ * UCRDY, UTMRLDY and UTRLRDY bits must be 1
+ */
+ reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
+ if (!(ufshcd_get_lists_status(reg))) {
+ ufshcd_enable_run_stop_reg(hba);
+ } else {
+ dev_err(hba->dev,
+ "Host controller not ready to process requests");
+ err = -EIO;
+ goto out;
+ }
+
+out:
+ return err;
+}
+
+/**
+ * ufshcd_hba_stop - Send controller to reset state
+ * @hba: per adapter instance
+ * @can_sleep: perform sleep or just spin
+ */
+static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
+{
+ int err;
+
+ ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
+ err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
+ CONTROLLER_ENABLE, CONTROLLER_DISABLE,
+ 10, 1, can_sleep);
+ if (err)
+ dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
+}
+
+/**
+ * ufshcd_hba_execute_hce - initialize the controller
+ * @hba: per adapter instance
+ *
+ * The controller resets itself and controller firmware initialization
+ * sequence kicks off. When controller is ready it will set
+ * the Host Controller Enable bit to 1.
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
+{
+ int retry;
+
+ /*
+ * msleep of 1 and 5 used in this function might result in msleep(20),
+ * but it was necessary to send the UFS FPGA to reset mode during
+ * development and testing of this driver. msleep can be changed to
+ * mdelay and retry count can be reduced based on the controller.
+ */
+ if (!ufshcd_is_hba_active(hba))
+ /* change controller state to "reset state" */
+ ufshcd_hba_stop(hba, true);
+
+ /* UniPro link is disabled at this point */
+ ufshcd_set_link_off(hba);
+
+ ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
+
+ /* start controller initialization sequence */
+ ufshcd_hba_start(hba);
+
+ /*
+ * To initialize a UFS host controller HCE bit must be set to 1.
+ * During initialization the HCE bit value changes from 1->0->1.
+ * When the host controller completes initialization sequence
+ * it sets the value of HCE bit to 1. The same HCE bit is read back
+ * to check if the controller has completed initialization sequence.
+ * So without this delay the value HCE = 1, set in the previous
+ * instruction might be read back.
+ * This delay can be changed based on the controller.
+ */
+ msleep(1);
+
+ /* wait for the host controller to complete initialization */
+ retry = 10;
+ while (ufshcd_is_hba_active(hba)) {
+ if (retry) {
+ retry--;
+ } else {
+ dev_err(hba->dev,
+ "Controller enable failed\n");
+ return -EIO;
+ }
+ msleep(5);
+ }
+
+ /* enable UIC related interrupts */
+ ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
+
+ ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
+
+ return 0;
+}
+
+static int ufshcd_hba_enable(struct ufs_hba *hba)
+{
+ int ret;
+
+ if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
+ ufshcd_set_link_off(hba);
+ ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
+
+ /* enable UIC related interrupts */
+ ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
+ ret = ufshcd_dme_reset(hba);
+ if (!ret) {
+ ret = ufshcd_dme_enable(hba);
+ if (!ret)
+ ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
+ if (ret)
+ dev_err(hba->dev,
+ "Host controller enable failed with non-hce\n");
+ }
+ } else {
+ ret = ufshcd_hba_execute_hce(hba);
+ }
+
+ return ret;
+}
+static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
+{
+ int tx_lanes, i, err = 0;
+
+ if (!peer)
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
+ &tx_lanes);
+ else
+ ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
+ &tx_lanes);
+ for (i = 0; i < tx_lanes; i++) {
+ if (!peer)
+ err = ufshcd_dme_set(hba,
+ UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
+ UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
+ 0);
+ else
+ err = ufshcd_dme_peer_set(hba,
+ UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
+ UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
+ 0);
+ if (err) {
+ dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
+ __func__, peer, i, err);
+ break;
+ }
+ }
+
+ return err;
+}
+
+static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
+{
+ return ufshcd_disable_tx_lcc(hba, true);
+}
+
+/**
+ * ufshcd_link_startup - Initialize unipro link startup
+ * @hba: per adapter instance
+ *
+ * Returns 0 for success, non-zero in case of failure
+ */
+static int ufshcd_link_startup(struct ufs_hba *hba)
+{
+ int ret;
+ int retries = DME_LINKSTARTUP_RETRIES;
+ bool link_startup_again = false;
+
+ /*
+ * If UFS device isn't active then we will have to issue link startup
+ * 2 times to make sure the device state move to active.
+ */
+ if (!ufshcd_is_ufs_dev_active(hba))
+ link_startup_again = true;
+
+link_startup:
+ do {
+ ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
+
+ ret = ufshcd_dme_link_startup(hba);
+
+ /* check if device is detected by inter-connect layer */
+ if (!ret && !ufshcd_is_device_present(hba)) {
+ dev_err(hba->dev, "%s: Device not present\n", __func__);
+ ret = -ENXIO;
+ goto out;
+ }
+
+ /*
+ * DME link lost indication is only received when link is up,
+ * but we can't be sure if the link is up until link startup
+ * succeeds. So reset the local Uni-Pro and try again.
+ */
+ if (ret && ufshcd_hba_enable(hba))
+ goto out;
+ } while (ret && retries--);
+
+ if (ret)
+ /* failed to get the link up... retire */
+ goto out;
+
+ if (link_startup_again) {
+ link_startup_again = false;
+ retries = DME_LINKSTARTUP_RETRIES;
+ goto link_startup;
+ }
+
+ /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
+ ufshcd_init_pwr_info(hba);
+ ufshcd_print_pwr_info(hba);
+
+ if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
+ ret = ufshcd_disable_device_tx_lcc(hba);
+ if (ret)
+ goto out;
+ }
+
+ /* Include any host controller configuration via UIC commands */
+ ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_make_hba_operational(hba);
+out:
+ if (ret) {
+ dev_err(hba->dev, "link startup failed %d\n", ret);
+ ufshcd_print_host_state(hba);
+ ufshcd_print_pwr_info(hba);
+ ufshcd_print_host_regs(hba);
+ }
+ return ret;
+}
+
+/**
+ * ufshcd_verify_dev_init() - Verify device initialization
+ * @hba: per-adapter instance
+ *
+ * Send NOP OUT UPIU and wait for NOP IN response to check whether the
+ * device Transport Protocol (UTP) layer is ready after a reset.
+ * If the UTP layer at the device side is not initialized, it may
+ * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
+ * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
+ */
+static int ufshcd_verify_dev_init(struct ufs_hba *hba)
+{
+ int err = 0;
+ int retries;
+
+ ufshcd_hold(hba, false);
+ mutex_lock(&hba->dev_cmd.lock);
+ for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
+ NOP_OUT_TIMEOUT);
+
+ if (!err || err == -ETIMEDOUT)
+ break;
+
+ dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
+ }
+ mutex_unlock(&hba->dev_cmd.lock);
+ ufshcd_release(hba);
+
+ if (err)
+ dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
+ return err;
+}
+
+/**
+ * ufshcd_set_queue_depth - set lun queue depth
+ * @sdev: pointer to SCSI device
+ *
+ * Read bLUQueueDepth value and activate scsi tagged command
+ * queueing. For WLUN, queue depth is set to 1. For best-effort
+ * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
+ * value that host can queue.
+ */
+static void ufshcd_set_queue_depth(struct scsi_device *sdev)
+{
+ int ret = 0;
+ u8 lun_qdepth;
+ struct ufs_hba *hba;
+
+ hba = shost_priv(sdev->host);
+
+ lun_qdepth = hba->nutrs;
+ ret = ufshcd_read_unit_desc_param(hba,
+ ufshcd_scsi_to_upiu_lun(sdev->lun),
+ UNIT_DESC_PARAM_LU_Q_DEPTH,
+ &lun_qdepth,
+ sizeof(lun_qdepth));
+
+ /* Some WLUN doesn't support unit descriptor */
+ if (ret == -EOPNOTSUPP)
+ lun_qdepth = 1;
+ else if (!lun_qdepth)
+ /* eventually, we can figure out the real queue depth */
+ lun_qdepth = hba->nutrs;
+ else
+ lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
+
+ dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
+ __func__, lun_qdepth);
+ scsi_change_queue_depth(sdev, lun_qdepth);
+}
+
+/*
+ * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
+ * @hba: per-adapter instance
+ * @lun: UFS device lun id
+ * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
+ *
+ * Returns 0 in case of success and b_lu_write_protect status would be returned
+ * @b_lu_write_protect parameter.
+ * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
+ * Returns -EINVAL in case of invalid parameters passed to this function.
+ */
+static int ufshcd_get_lu_wp(struct ufs_hba *hba,
+ u8 lun,
+ u8 *b_lu_write_protect)
+{
+ int ret;
+
+ if (!b_lu_write_protect)
+ ret = -EINVAL;
+ /*
+ * According to UFS device spec, RPMB LU can't be write
+ * protected so skip reading bLUWriteProtect parameter for
+ * it. For other W-LUs, UNIT DESCRIPTOR is not available.
+ */
+ else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
+ ret = -ENOTSUPP;
+ else
+ ret = ufshcd_read_unit_desc_param(hba,
+ lun,
+ UNIT_DESC_PARAM_LU_WR_PROTECT,
+ b_lu_write_protect,
+ sizeof(*b_lu_write_protect));
+ return ret;
+}
+
+/**
+ * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
+ * status
+ * @hba: per-adapter instance
+ * @sdev: pointer to SCSI device
+ *
+ */
+static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
+ struct scsi_device *sdev)
+{
+ if (hba->dev_info.f_power_on_wp_en &&
+ !hba->dev_info.is_lu_power_on_wp) {
+ u8 b_lu_write_protect;
+
+ if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
+ &b_lu_write_protect) &&
+ (b_lu_write_protect == UFS_LU_POWER_ON_WP))
+ hba->dev_info.is_lu_power_on_wp = true;
+ }
+}
+
+/**
+ * ufshcd_slave_alloc - handle initial SCSI device configurations
+ * @sdev: pointer to SCSI device
+ *
+ * Returns success
+ */
+static int ufshcd_slave_alloc(struct scsi_device *sdev)
+{
+ struct ufs_hba *hba;
+
+ hba = shost_priv(sdev->host);
+
+ /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
+ sdev->use_10_for_ms = 1;
+
+ /* allow SCSI layer to restart the device in case of errors */
+ sdev->allow_restart = 1;
+
+ /* REPORT SUPPORTED OPERATION CODES is not supported */
+ sdev->no_report_opcodes = 1;
+
+ /* WRITE_SAME command is not supported */
+ sdev->no_write_same = 1;
+
+ ufshcd_set_queue_depth(sdev);
+
+ ufshcd_get_lu_power_on_wp_status(hba, sdev);
+
+ return 0;
+}
+
+/**
+ * ufshcd_change_queue_depth - change queue depth
+ * @sdev: pointer to SCSI device
+ * @depth: required depth to set
+ *
+ * Change queue depth and make sure the max. limits are not crossed.
+ */
+static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
+{
+ struct ufs_hba *hba = shost_priv(sdev->host);
+
+ if (depth > hba->nutrs)
+ depth = hba->nutrs;
+ return scsi_change_queue_depth(sdev, depth);
+}
+
+/**
+ * ufshcd_slave_configure - adjust SCSI device configurations
+ * @sdev: pointer to SCSI device
+ */
+static int ufshcd_slave_configure(struct scsi_device *sdev)
+{
+ struct request_queue *q = sdev->request_queue;
+
+ blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
+ blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
+
+ return 0;
+}
+
+/**
+ * ufshcd_slave_destroy - remove SCSI device configurations
+ * @sdev: pointer to SCSI device
+ */
+static void ufshcd_slave_destroy(struct scsi_device *sdev)
+{
+ struct ufs_hba *hba;
+
+ hba = shost_priv(sdev->host);
+ /* Drop the reference as it won't be needed anymore */
+ if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->sdev_ufs_device = NULL;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ }
+}
+
+/**
+ * ufshcd_task_req_compl - handle task management request completion
+ * @hba: per adapter instance
+ * @index: index of the completed request
+ * @resp: task management service response
+ *
+ * Returns non-zero value on error, zero on success
+ */
+static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
+{
+ struct utp_task_req_desc *task_req_descp;
+ struct utp_upiu_task_rsp *task_rsp_upiup;
+ unsigned long flags;
+ int ocs_value;
+ int task_result;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+
+ /* Clear completed tasks from outstanding_tasks */
+ __clear_bit(index, &hba->outstanding_tasks);
+
+ task_req_descp = hba->utmrdl_base_addr;
+ ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
+
+ if (ocs_value == OCS_SUCCESS) {
+ task_rsp_upiup = (struct utp_upiu_task_rsp *)
+ task_req_descp[index].task_rsp_upiu;
+ task_result = be32_to_cpu(task_rsp_upiup->output_param1);
+ task_result = task_result & MASK_TM_SERVICE_RESP;
+ if (resp)
+ *resp = (u8)task_result;
+ } else {
+ dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
+ __func__, ocs_value);
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ return ocs_value;
+}
+
+/**
+ * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
+ * @lrbp: pointer to local reference block of completed command
+ * @scsi_status: SCSI command status
+ *
+ * Returns value base on SCSI command status
+ */
+static inline int
+ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
+{
+ int result = 0;
+
+ switch (scsi_status) {
+ case SAM_STAT_CHECK_CONDITION:
+ ufshcd_copy_sense_data(lrbp);
+ case SAM_STAT_GOOD:
+ result |= DID_OK << 16 |
+ COMMAND_COMPLETE << 8 |
+ scsi_status;
+ break;
+ case SAM_STAT_TASK_SET_FULL:
+ case SAM_STAT_BUSY:
+ case SAM_STAT_TASK_ABORTED:
+ ufshcd_copy_sense_data(lrbp);
+ result |= scsi_status;
+ break;
+ default:
+ result |= DID_ERROR << 16;
+ break;
+ } /* end of switch */
+
+ return result;
+}
+
+/**
+ * ufshcd_transfer_rsp_status - Get overall status of the response
+ * @hba: per adapter instance
+ * @lrbp: pointer to local reference block of completed command
+ *
+ * Returns result of the command to notify SCSI midlayer
+ */
+static inline int
+ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ int result = 0;
+ int scsi_status;
+ int ocs;
+
+ /* overall command status of utrd */
+ ocs = ufshcd_get_tr_ocs(lrbp);
+
+ switch (ocs) {
+ case OCS_SUCCESS:
+ result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
+ hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
+ switch (result) {
+ case UPIU_TRANSACTION_RESPONSE:
+ /*
+ * get the response UPIU result to extract
+ * the SCSI command status
+ */
+ result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
+
+ /*
+ * get the result based on SCSI status response
+ * to notify the SCSI midlayer of the command status
+ */
+ scsi_status = result & MASK_SCSI_STATUS;
+ result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
+
+ /*
+ * Currently we are only supporting BKOPs exception
+ * events hence we can ignore BKOPs exception event
+ * during power management callbacks. BKOPs exception
+ * event is not expected to be raised in runtime suspend
+ * callback as it allows the urgent bkops.
+ * During system suspend, we are anyway forcefully
+ * disabling the bkops and if urgent bkops is needed
+ * it will be enabled on system resume. Long term
+ * solution could be to abort the system suspend if
+ * UFS device needs urgent BKOPs.
+ */
+ if (!hba->pm_op_in_progress &&
+ ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
+ schedule_work(&hba->eeh_work);
+ break;
+ case UPIU_TRANSACTION_REJECT_UPIU:
+ /* TODO: handle Reject UPIU Response */
+ result = DID_ERROR << 16;
+ dev_err(hba->dev,
+ "Reject UPIU not fully implemented\n");
+ break;
+ default:
+ result = DID_ERROR << 16;
+ dev_err(hba->dev,
+ "Unexpected request response code = %x\n",
+ result);
+ break;
+ }
+ break;
+ case OCS_ABORTED:
+ result |= DID_ABORT << 16;
+ break;
+ case OCS_INVALID_COMMAND_STATUS:
+ result |= DID_REQUEUE << 16;
+ break;
+ case OCS_INVALID_CMD_TABLE_ATTR:
+ case OCS_INVALID_PRDT_ATTR:
+ case OCS_MISMATCH_DATA_BUF_SIZE:
+ case OCS_MISMATCH_RESP_UPIU_SIZE:
+ case OCS_PEER_COMM_FAILURE:
+ case OCS_FATAL_ERROR:
+ default:
+ result |= DID_ERROR << 16;
+ dev_err(hba->dev,
+ "OCS error from controller = %x for tag %d\n",
+ ocs, lrbp->task_tag);
+ ufshcd_print_host_regs(hba);
+ ufshcd_print_host_state(hba);
+ break;
+ } /* end of switch */
+
+ if (host_byte(result) != DID_OK)
+ ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
+ return result;
+}
+
+/**
+ * ufshcd_uic_cmd_compl - handle completion of uic command
+ * @hba: per adapter instance
+ * @intr_status: interrupt status generated by the controller
+ */
+static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
+{
+ if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
+ hba->active_uic_cmd->argument2 |=
+ ufshcd_get_uic_cmd_result(hba);
+ hba->active_uic_cmd->argument3 =
+ ufshcd_get_dme_attr_val(hba);
+ complete(&hba->active_uic_cmd->done);
+ }
+
+ if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
+ complete(hba->uic_async_done);
+}
+
+/**
+ * __ufshcd_transfer_req_compl - handle SCSI and query command completion
+ * @hba: per adapter instance
+ * @completed_reqs: requests to complete
+ */
+static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
+ unsigned long completed_reqs)
+{
+ struct ufshcd_lrb *lrbp;
+ struct scsi_cmnd *cmd;
+ int result;
+ int index;
+
+ for_each_set_bit(index, &completed_reqs, hba->nutrs) {
+ lrbp = &hba->lrb[index];
+ cmd = lrbp->cmd;
+ if (cmd) {
+ ufshcd_add_command_trace(hba, index, "complete");
+ result = ufshcd_transfer_rsp_status(hba, lrbp);
+ scsi_dma_unmap(cmd);
+ cmd->result = result;
+ /* Mark completed command as NULL in LRB */
+ lrbp->cmd = NULL;
+ clear_bit_unlock(index, &hba->lrb_in_use);
+ /* Do not touch lrbp after scsi done */
+ cmd->scsi_done(cmd);
+ __ufshcd_release(hba);
+ } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
+ lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
+ if (hba->dev_cmd.complete) {
+ ufshcd_add_command_trace(hba, index,
+ "dev_complete");
+ complete(hba->dev_cmd.complete);
+ }
+ }
+ if (ufshcd_is_clkscaling_supported(hba))
+ hba->clk_scaling.active_reqs--;
+
+ lrbp->compl_time_stamp = ktime_get();
+ }
+
+ /* clear corresponding bits of completed commands */
+ hba->outstanding_reqs ^= completed_reqs;
+
+ ufshcd_clk_scaling_update_busy(hba);
+
+ /* we might have free'd some tags above */
+ wake_up(&hba->dev_cmd.tag_wq);
+}
+
+/**
+ * ufshcd_transfer_req_compl - handle SCSI and query command completion
+ * @hba: per adapter instance
+ */
+static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
+{
+ unsigned long completed_reqs;
+ u32 tr_doorbell;
+
+ /* Resetting interrupt aggregation counters first and reading the
+ * DOOR_BELL afterward allows us to handle all the completed requests.
+ * In order to prevent other interrupts starvation the DB is read once
+ * after reset. The down side of this solution is the possibility of
+ * false interrupt if device completes another request after resetting
+ * aggregation and before reading the DB.
+ */
+ if (ufshcd_is_intr_aggr_allowed(hba) &&
+ !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
+ ufshcd_reset_intr_aggr(hba);
+
+ tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
+
+ __ufshcd_transfer_req_compl(hba, completed_reqs);
+}
+
+/**
+ * ufshcd_disable_ee - disable exception event
+ * @hba: per-adapter instance
+ * @mask: exception event to disable
+ *
+ * Disables exception event in the device so that the EVENT_ALERT
+ * bit is not set.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
+{
+ int err = 0;
+ u32 val;
+
+ if (!(hba->ee_ctrl_mask & mask))
+ goto out;
+
+ val = hba->ee_ctrl_mask & ~mask;
+ val &= MASK_EE_STATUS;
+ err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
+ if (!err)
+ hba->ee_ctrl_mask &= ~mask;
+out:
+ return err;
+}
+
+/**
+ * ufshcd_enable_ee - enable exception event
+ * @hba: per-adapter instance
+ * @mask: exception event to enable
+ *
+ * Enable corresponding exception event in the device to allow
+ * device to alert host in critical scenarios.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
+{
+ int err = 0;
+ u32 val;
+
+ if (hba->ee_ctrl_mask & mask)
+ goto out;
+
+ val = hba->ee_ctrl_mask | mask;
+ val &= MASK_EE_STATUS;
+ err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
+ if (!err)
+ hba->ee_ctrl_mask |= mask;
+out:
+ return err;
+}
+
+/**
+ * ufshcd_enable_auto_bkops - Allow device managed BKOPS
+ * @hba: per-adapter instance
+ *
+ * Allow device to manage background operations on its own. Enabling
+ * this might lead to inconsistent latencies during normal data transfers
+ * as the device is allowed to manage its own way of handling background
+ * operations.
+ *
+ * Returns zero on success, non-zero on failure.
+ */
+static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
+{
+ int err = 0;
+
+ if (hba->auto_bkops_enabled)
+ goto out;
+
+ err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
+ QUERY_FLAG_IDN_BKOPS_EN, NULL);
+ if (err) {
+ dev_err(hba->dev, "%s: failed to enable bkops %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ hba->auto_bkops_enabled = true;
+ trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
+
+ /* No need of URGENT_BKOPS exception from the device */
+ err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
+ if (err)
+ dev_err(hba->dev, "%s: failed to disable exception event %d\n",
+ __func__, err);
+out:
+ return err;
+}
+
+/**
+ * ufshcd_disable_auto_bkops - block device in doing background operations
+ * @hba: per-adapter instance
+ *
+ * Disabling background operations improves command response latency but
+ * has drawback of device moving into critical state where the device is
+ * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
+ * host is idle so that BKOPS are managed effectively without any negative
+ * impacts.
+ *
+ * Returns zero on success, non-zero on failure.
+ */
+static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
+{
+ int err = 0;
+
+ if (!hba->auto_bkops_enabled)
+ goto out;
+
+ /*
+ * If host assisted BKOPs is to be enabled, make sure
+ * urgent bkops exception is allowed.
+ */
+ err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
+ if (err) {
+ dev_err(hba->dev, "%s: failed to enable exception event %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
+ QUERY_FLAG_IDN_BKOPS_EN, NULL);
+ if (err) {
+ dev_err(hba->dev, "%s: failed to disable bkops %d\n",
+ __func__, err);
+ ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
+ goto out;
+ }
+
+ hba->auto_bkops_enabled = false;
+ trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
+out:
+ return err;
+}
+
+/**
+ * ufshcd_force_reset_auto_bkops - force reset auto bkops state
+ * @hba: per adapter instance
+ *
+ * After a device reset the device may toggle the BKOPS_EN flag
+ * to default value. The s/w tracking variables should be updated
+ * as well. This function would change the auto-bkops state based on
+ * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
+ */
+static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
+{
+ if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
+ hba->auto_bkops_enabled = false;
+ hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
+ ufshcd_enable_auto_bkops(hba);
+ } else {
+ hba->auto_bkops_enabled = true;
+ hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
+ ufshcd_disable_auto_bkops(hba);
+ }
+}
+
+static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
+{
+ return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
+}
+
+/**
+ * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
+ * @hba: per-adapter instance
+ * @status: bkops_status value
+ *
+ * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
+ * flag in the device to permit background operations if the device
+ * bkops_status is greater than or equal to "status" argument passed to
+ * this function, disable otherwise.
+ *
+ * Returns 0 for success, non-zero in case of failure.
+ *
+ * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
+ * to know whether auto bkops is enabled or disabled after this function
+ * returns control to it.
+ */
+static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
+ enum bkops_status status)
+{
+ int err;
+ u32 curr_status = 0;
+
+ err = ufshcd_get_bkops_status(hba, &curr_status);
+ if (err) {
+ dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
+ __func__, err);
+ goto out;
+ } else if (curr_status > BKOPS_STATUS_MAX) {
+ dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
+ __func__, curr_status);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (curr_status >= status)
+ err = ufshcd_enable_auto_bkops(hba);
+ else
+ err = ufshcd_disable_auto_bkops(hba);
+out:
+ return err;
+}
+
+/**
+ * ufshcd_urgent_bkops - handle urgent bkops exception event
+ * @hba: per-adapter instance
+ *
+ * Enable fBackgroundOpsEn flag in the device to permit background
+ * operations.
+ *
+ * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
+ * and negative error value for any other failure.
+ */
+static int ufshcd_urgent_bkops(struct ufs_hba *hba)
+{
+ return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
+}
+
+static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
+{
+ return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
+}
+
+static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
+{
+ int err;
+ u32 curr_status = 0;
+
+ if (hba->is_urgent_bkops_lvl_checked)
+ goto enable_auto_bkops;
+
+ err = ufshcd_get_bkops_status(hba, &curr_status);
+ if (err) {
+ dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ /*
+ * We are seeing that some devices are raising the urgent bkops
+ * exception events even when BKOPS status doesn't indicate performace
+ * impacted or critical. Handle these device by determining their urgent
+ * bkops status at runtime.
+ */
+ if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
+ dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
+ __func__, curr_status);
+ /* update the current status as the urgent bkops level */
+ hba->urgent_bkops_lvl = curr_status;
+ hba->is_urgent_bkops_lvl_checked = true;
+ }
+
+enable_auto_bkops:
+ err = ufshcd_enable_auto_bkops(hba);
+out:
+ if (err < 0)
+ dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
+ __func__, err);
+}
+
+/**
+ * ufshcd_exception_event_handler - handle exceptions raised by device
+ * @work: pointer to work data
+ *
+ * Read bExceptionEventStatus attribute from the device and handle the
+ * exception event accordingly.
+ */
+static void ufshcd_exception_event_handler(struct work_struct *work)
+{
+ struct ufs_hba *hba;
+ int err;
+ u32 status = 0;
+ hba = container_of(work, struct ufs_hba, eeh_work);
+
+ pm_runtime_get_sync(hba->dev);
+ scsi_block_requests(hba->host);
+ err = ufshcd_get_ee_status(hba, &status);
+ if (err) {
+ dev_err(hba->dev, "%s: failed to get exception status %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ status &= hba->ee_ctrl_mask;
+
+ if (status & MASK_EE_URGENT_BKOPS)
+ ufshcd_bkops_exception_event_handler(hba);
+
+out:
+ scsi_unblock_requests(hba->host);
+ pm_runtime_put_sync(hba->dev);
+ return;
+}
+
+/* Complete requests that have door-bell cleared */
+static void ufshcd_complete_requests(struct ufs_hba *hba)
+{
+ ufshcd_transfer_req_compl(hba);
+ ufshcd_tmc_handler(hba);
+}
+
+/**
+ * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
+ * to recover from the DL NAC errors or not.
+ * @hba: per-adapter instance
+ *
+ * Returns true if error handling is required, false otherwise
+ */
+static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
+{
+ unsigned long flags;
+ bool err_handling = true;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ /*
+ * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
+ * device fatal error and/or DL NAC & REPLAY timeout errors.
+ */
+ if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
+ goto out;
+
+ if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
+ ((hba->saved_err & UIC_ERROR) &&
+ (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
+ goto out;
+
+ if ((hba->saved_err & UIC_ERROR) &&
+ (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
+ int err;
+ /*
+ * wait for 50ms to see if we can get any other errors or not.
+ */
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ msleep(50);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+
+ /*
+ * now check if we have got any other severe errors other than
+ * DL NAC error?
+ */
+ if ((hba->saved_err & INT_FATAL_ERRORS) ||
+ ((hba->saved_err & UIC_ERROR) &&
+ (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
+ goto out;
+
+ /*
+ * As DL NAC is the only error received so far, send out NOP
+ * command to confirm if link is still active or not.
+ * - If we don't get any response then do error recovery.
+ * - If we get response then clear the DL NAC error bit.
+ */
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ err = ufshcd_verify_dev_init(hba);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+
+ if (err)
+ goto out;
+
+ /* Link seems to be alive hence ignore the DL NAC errors */
+ if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
+ hba->saved_err &= ~UIC_ERROR;
+ /* clear NAC error */
+ hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
+ if (!hba->saved_uic_err) {
+ err_handling = false;
+ goto out;
+ }
+ }
+out:
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return err_handling;
+}
+
+/**
+ * ufshcd_err_handler - handle UFS errors that require s/w attention
+ * @work: pointer to work structure
+ */
+static void ufshcd_err_handler(struct work_struct *work)
+{
+ struct ufs_hba *hba;
+ unsigned long flags;
+ u32 err_xfer = 0;
+ u32 err_tm = 0;
+ int err = 0;
+ int tag;
+ bool needs_reset = false;
+
+ hba = container_of(work, struct ufs_hba, eh_work);
+
+ pm_runtime_get_sync(hba->dev);
+ ufshcd_hold(hba, false);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (hba->ufshcd_state == UFSHCD_STATE_RESET)
+ goto out;
+
+ hba->ufshcd_state = UFSHCD_STATE_RESET;
+ ufshcd_set_eh_in_progress(hba);
+
+ /* Complete requests that have door-bell cleared by h/w */
+ ufshcd_complete_requests(hba);
+
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
+ bool ret;
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
+ ret = ufshcd_quirk_dl_nac_errors(hba);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (!ret)
+ goto skip_err_handling;
+ }
+ if ((hba->saved_err & INT_FATAL_ERRORS) ||
+ ((hba->saved_err & UIC_ERROR) &&
+ (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
+ UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
+ UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
+ needs_reset = true;
+
+ /*
+ * if host reset is required then skip clearing the pending
+ * transfers forcefully because they will automatically get
+ * cleared after link startup.
+ */
+ if (needs_reset)
+ goto skip_pending_xfer_clear;
+
+ /* release lock as clear command might sleep */
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ /* Clear pending transfer requests */
+ for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
+ if (ufshcd_clear_cmd(hba, tag)) {
+ err_xfer = true;
+ goto lock_skip_pending_xfer_clear;
+ }
+ }
+
+ /* Clear pending task management requests */
+ for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
+ if (ufshcd_clear_tm_cmd(hba, tag)) {
+ err_tm = true;
+ goto lock_skip_pending_xfer_clear;
+ }
+ }
+
+lock_skip_pending_xfer_clear:
+ spin_lock_irqsave(hba->host->host_lock, flags);
+
+ /* Complete the requests that are cleared by s/w */
+ ufshcd_complete_requests(hba);
+
+ if (err_xfer || err_tm)
+ needs_reset = true;
+
+skip_pending_xfer_clear:
+ /* Fatal errors need reset */
+ if (needs_reset) {
+ unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
+
+ /*
+ * ufshcd_reset_and_restore() does the link reinitialization
+ * which will need atleast one empty doorbell slot to send the
+ * device management commands (NOP and query commands).
+ * If there is no slot empty at this moment then free up last
+ * slot forcefully.
+ */
+ if (hba->outstanding_reqs == max_doorbells)
+ __ufshcd_transfer_req_compl(hba,
+ (1UL << (hba->nutrs - 1)));
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ err = ufshcd_reset_and_restore(hba);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (err) {
+ dev_err(hba->dev, "%s: reset and restore failed\n",
+ __func__);
+ hba->ufshcd_state = UFSHCD_STATE_ERROR;
+ }
+ /*
+ * Inform scsi mid-layer that we did reset and allow to handle
+ * Unit Attention properly.
+ */
+ scsi_report_bus_reset(hba->host, 0);
+ hba->saved_err = 0;
+ hba->saved_uic_err = 0;
+ }
+
+skip_err_handling:
+ if (!needs_reset) {
+ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+ if (hba->saved_err || hba->saved_uic_err)
+ dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
+ __func__, hba->saved_err, hba->saved_uic_err);
+ }
+
+ ufshcd_clear_eh_in_progress(hba);
+
+out:
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ufshcd_scsi_unblock_requests(hba);
+ ufshcd_release(hba);
+ pm_runtime_put_sync(hba->dev);
+}
+
+static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
+ u32 reg)
+{
+ reg_hist->reg[reg_hist->pos] = reg;
+ reg_hist->tstamp[reg_hist->pos] = ktime_get();
+ reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH;
+}
+
+/**
+ * ufshcd_update_uic_error - check and set fatal UIC error flags.
+ * @hba: per-adapter instance
+ */
+static void ufshcd_update_uic_error(struct ufs_hba *hba)
+{
+ u32 reg;
+
+ /* PHY layer lane error */
+ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
+ /* Ignore LINERESET indication, as this is not an error */
+ if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
+ (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
+ /*
+ * To know whether this error is fatal or not, DB timeout
+ * must be checked but this error is handled separately.
+ */
+ dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
+ ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg);
+ }
+
+ /* PA_INIT_ERROR is fatal and needs UIC reset */
+ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
+ if (reg)
+ ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg);
+
+ if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
+ hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
+ else if (hba->dev_quirks &
+ UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
+ if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
+ hba->uic_error |=
+ UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
+ else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
+ hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
+ }
+
+ /* UIC NL/TL/DME errors needs software retry */
+ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
+ if (reg) {
+ ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg);
+ hba->uic_error |= UFSHCD_UIC_NL_ERROR;
+ }
+
+ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
+ if (reg) {
+ ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg);
+ hba->uic_error |= UFSHCD_UIC_TL_ERROR;
+ }
+
+ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
+ if (reg) {
+ ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg);
+ hba->uic_error |= UFSHCD_UIC_DME_ERROR;
+ }
+
+ dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
+ __func__, hba->uic_error);
+}
+
+/**
+ * ufshcd_check_errors - Check for errors that need s/w attention
+ * @hba: per-adapter instance
+ */
+static void ufshcd_check_errors(struct ufs_hba *hba)
+{
+ bool queue_eh_work = false;
+
+ if (hba->errors & INT_FATAL_ERRORS)
+ queue_eh_work = true;
+
+ if (hba->errors & UIC_ERROR) {
+ hba->uic_error = 0;
+ ufshcd_update_uic_error(hba);
+ if (hba->uic_error)
+ queue_eh_work = true;
+ }
+
+ if (queue_eh_work) {
+ /*
+ * update the transfer error masks to sticky bits, let's do this
+ * irrespective of current ufshcd_state.
+ */
+ hba->saved_err |= hba->errors;
+ hba->saved_uic_err |= hba->uic_error;
+
+ /* handle fatal errors only when link is functional */
+ if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
+ /* block commands from scsi mid-layer */
+ ufshcd_scsi_block_requests(hba);
+
+ hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
+
+ /* dump controller state before resetting */
+ if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
+ bool pr_prdt = !!(hba->saved_err &
+ SYSTEM_BUS_FATAL_ERROR);
+
+ dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
+ __func__, hba->saved_err,
+ hba->saved_uic_err);
+
+ ufshcd_print_host_regs(hba);
+ ufshcd_print_pwr_info(hba);
+ ufshcd_print_tmrs(hba, hba->outstanding_tasks);
+ ufshcd_print_trs(hba, hba->outstanding_reqs,
+ pr_prdt);
+ }
+ schedule_work(&hba->eh_work);
+ }
+ }
+ /*
+ * if (!queue_eh_work) -
+ * Other errors are either non-fatal where host recovers
+ * itself without s/w intervention or errors that will be
+ * handled by the SCSI core layer.
+ */
+}
+
+/**
+ * ufshcd_tmc_handler - handle task management function completion
+ * @hba: per adapter instance
+ */
+static void ufshcd_tmc_handler(struct ufs_hba *hba)
+{
+ u32 tm_doorbell;
+
+ tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
+ hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
+ wake_up(&hba->tm_wq);
+}
+
+/**
+ * ufshcd_sl_intr - Interrupt service routine
+ * @hba: per adapter instance
+ * @intr_status: contains interrupts generated by the controller
+ */
+static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
+{
+ hba->errors = UFSHCD_ERROR_MASK & intr_status;
+ if (hba->errors)
+ ufshcd_check_errors(hba);
+
+ if (intr_status & UFSHCD_UIC_MASK)
+ ufshcd_uic_cmd_compl(hba, intr_status);
+
+ if (intr_status & UTP_TASK_REQ_COMPL)
+ ufshcd_tmc_handler(hba);
+
+ if (intr_status & UTP_TRANSFER_REQ_COMPL)
+ ufshcd_transfer_req_compl(hba);
+}
+
+/**
+ * ufshcd_intr - Main interrupt service routine
+ * @irq: irq number
+ * @__hba: pointer to adapter instance
+ *
+ * Returns IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
+ */
+static irqreturn_t ufshcd_intr(int irq, void *__hba)
+{
+ u32 intr_status, enabled_intr_status;
+ irqreturn_t retval = IRQ_NONE;
+ struct ufs_hba *hba = __hba;
+ int retries = hba->nutrs;
+
+ spin_lock(hba->host->host_lock);
+ intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+
+ /*
+ * There could be max of hba->nutrs reqs in flight and in worst case
+ * if the reqs get finished 1 by 1 after the interrupt status is
+ * read, make sure we handle them by checking the interrupt status
+ * again in a loop until we process all of the reqs before returning.
+ */
+ do {
+ enabled_intr_status =
+ intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+ if (intr_status)
+ ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
+ if (enabled_intr_status) {
+ ufshcd_sl_intr(hba, enabled_intr_status);
+ retval = IRQ_HANDLED;
+ }
+
+ intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+ } while (intr_status && --retries);
+
+ spin_unlock(hba->host->host_lock);
+ return retval;
+}
+
+static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
+{
+ int err = 0;
+ u32 mask = 1 << tag;
+ unsigned long flags;
+
+ if (!test_bit(tag, &hba->outstanding_tasks))
+ goto out;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_utmrl_clear(hba, tag);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ /* poll for max. 1 sec to clear door bell register by h/w */
+ err = ufshcd_wait_for_register(hba,
+ REG_UTP_TASK_REQ_DOOR_BELL,
+ mask, 0, 1000, 1000, true);
+out:
+ return err;
+}
+
+/**
+ * ufshcd_issue_tm_cmd - issues task management commands to controller
+ * @hba: per adapter instance
+ * @lun_id: LUN ID to which TM command is sent
+ * @task_id: task ID to which the TM command is applicable
+ * @tm_function: task management function opcode
+ * @tm_response: task management service response return value
+ *
+ * Returns non-zero value on error, zero on success.
+ */
+static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
+ u8 tm_function, u8 *tm_response)
+{
+ struct utp_task_req_desc *task_req_descp;
+ struct utp_upiu_task_req *task_req_upiup;
+ struct Scsi_Host *host;
+ unsigned long flags;
+ int free_slot;
+ int err;
+ int task_tag;
+
+ host = hba->host;
+
+ /*
+ * Get free slot, sleep if slots are unavailable.
+ * Even though we use wait_event() which sleeps indefinitely,
+ * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
+ */
+ wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
+ ufshcd_hold(hba, false);
+
+ spin_lock_irqsave(host->host_lock, flags);
+ task_req_descp = hba->utmrdl_base_addr;
+ task_req_descp += free_slot;
+
+ /* Configure task request descriptor */
+ task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
+ task_req_descp->header.dword_2 =
+ cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
+
+ /* Configure task request UPIU */
+ task_req_upiup =
+ (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
+ task_tag = hba->nutrs + free_slot;
+ task_req_upiup->header.dword_0 =
+ UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
+ lun_id, task_tag);
+ task_req_upiup->header.dword_1 =
+ UPIU_HEADER_DWORD(0, tm_function, 0, 0);
+ /*
+ * The host shall provide the same value for LUN field in the basic
+ * header and for Input Parameter.
+ */
+ task_req_upiup->input_param1 = cpu_to_be32(lun_id);
+ task_req_upiup->input_param2 = cpu_to_be32(task_id);
+
+ ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
+
+ /* send command to the controller */
+ __set_bit(free_slot, &hba->outstanding_tasks);
+
+ /* Make sure descriptors are ready before ringing the task doorbell */
+ wmb();
+
+ ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
+ /* Make sure that doorbell is committed immediately */
+ wmb();
+
+ spin_unlock_irqrestore(host->host_lock, flags);
+
+ ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
+
+ /* wait until the task management command is completed */
+ err = wait_event_timeout(hba->tm_wq,
+ test_bit(free_slot, &hba->tm_condition),
+ msecs_to_jiffies(TM_CMD_TIMEOUT));
+ if (!err) {
+ ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
+ dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
+ __func__, tm_function);
+ if (ufshcd_clear_tm_cmd(hba, free_slot))
+ dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
+ __func__, free_slot);
+ err = -ETIMEDOUT;
+ } else {
+ err = ufshcd_task_req_compl(hba, free_slot, tm_response);
+ ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
+ }
+
+ clear_bit(free_slot, &hba->tm_condition);
+ ufshcd_put_tm_slot(hba, free_slot);
+ wake_up(&hba->tm_tag_wq);
+
+ ufshcd_release(hba);
+ return err;
+}
+
+/**
+ * ufshcd_eh_device_reset_handler - device reset handler registered to
+ * scsi layer.
+ * @cmd: SCSI command pointer
+ *
+ * Returns SUCCESS/FAILED
+ */
+static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *host;
+ struct ufs_hba *hba;
+ unsigned int tag;
+ u32 pos;
+ int err;
+ u8 resp = 0xF;
+ struct ufshcd_lrb *lrbp;
+ unsigned long flags;
+
+ host = cmd->device->host;
+ hba = shost_priv(host);
+ tag = cmd->request->tag;
+
+ lrbp = &hba->lrb[tag];
+ err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
+ if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
+ if (!err)
+ err = resp;
+ goto out;
+ }
+
+ /* clear the commands that were pending for corresponding LUN */
+ for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
+ if (hba->lrb[pos].lun == lrbp->lun) {
+ err = ufshcd_clear_cmd(hba, pos);
+ if (err)
+ break;
+ }
+ }
+ spin_lock_irqsave(host->host_lock, flags);
+ ufshcd_transfer_req_compl(hba);
+ spin_unlock_irqrestore(host->host_lock, flags);
+
+out:
+ hba->req_abort_count = 0;
+ if (!err) {
+ err = SUCCESS;
+ } else {
+ dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
+ err = FAILED;
+ }
+ return err;
+}
+
+static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
+{
+ struct ufshcd_lrb *lrbp;
+ int tag;
+
+ for_each_set_bit(tag, &bitmap, hba->nutrs) {
+ lrbp = &hba->lrb[tag];
+ lrbp->req_abort_skip = true;
+ }
+}
+
+/**
+ * ufshcd_abort - abort a specific command
+ * @cmd: SCSI command pointer
+ *
+ * Abort the pending command in device by sending UFS_ABORT_TASK task management
+ * command, and in host controller by clearing the door-bell register. There can
+ * be race between controller sending the command to the device while abort is
+ * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
+ * really issued and then try to abort it.
+ *
+ * Returns SUCCESS/FAILED
+ */
+static int ufshcd_abort(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *host;
+ struct ufs_hba *hba;
+ unsigned long flags;
+ unsigned int tag;
+ int err = 0;
+ int poll_cnt;
+ u8 resp = 0xF;
+ struct ufshcd_lrb *lrbp;
+ u32 reg;
+
+ host = cmd->device->host;
+ hba = shost_priv(host);
+ tag = cmd->request->tag;
+ lrbp = &hba->lrb[tag];
+ if (!ufshcd_valid_tag(hba, tag)) {
+ dev_err(hba->dev,
+ "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
+ __func__, tag, cmd, cmd->request);
+ BUG();
+ }
+
+ /*
+ * Task abort to the device W-LUN is illegal. When this command
+ * will fail, due to spec violation, scsi err handling next step
+ * will be to send LU reset which, again, is a spec violation.
+ * To avoid these unnecessary/illegal step we skip to the last error
+ * handling stage: reset and restore.
+ */
+ if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
+ return ufshcd_eh_host_reset_handler(cmd);
+
+ ufshcd_hold(hba, false);
+ reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ /* If command is already aborted/completed, return SUCCESS */
+ if (!(test_bit(tag, &hba->outstanding_reqs))) {
+ dev_err(hba->dev,
+ "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
+ __func__, tag, hba->outstanding_reqs, reg);
+ goto out;
+ }
+
+ if (!(reg & (1 << tag))) {
+ dev_err(hba->dev,
+ "%s: cmd was completed, but without a notifying intr, tag = %d",
+ __func__, tag);
+ }
+
+ /* Print Transfer Request of aborted task */
+ dev_err(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
+
+ /*
+ * Print detailed info about aborted request.
+ * As more than one request might get aborted at the same time,
+ * print full information only for the first aborted request in order
+ * to reduce repeated printouts. For other aborted requests only print
+ * basic details.
+ */
+ scsi_print_command(hba->lrb[tag].cmd);
+ if (!hba->req_abort_count) {
+ ufshcd_print_host_regs(hba);
+ ufshcd_print_host_state(hba);
+ ufshcd_print_pwr_info(hba);
+ ufshcd_print_trs(hba, 1 << tag, true);
+ } else {
+ ufshcd_print_trs(hba, 1 << tag, false);
+ }
+ hba->req_abort_count++;
+
+ /* Skip task abort in case previous aborts failed and report failure */
+ if (lrbp->req_abort_skip) {
+ err = -EIO;
+ goto out;
+ }
+
+ for (poll_cnt = 100; poll_cnt; poll_cnt--) {
+ err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
+ UFS_QUERY_TASK, &resp);
+ if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
+ /* cmd pending in the device */
+ dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
+ __func__, tag);
+ break;
+ } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
+ /*
+ * cmd not pending in the device, check if it is
+ * in transition.
+ */
+ dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
+ __func__, tag);
+ reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ if (reg & (1 << tag)) {
+ /* sleep for max. 200us to stabilize */
+ usleep_range(100, 200);
+ continue;
+ }
+ /* command completed already */
+ dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
+ __func__, tag);
+ goto out;
+ } else {
+ dev_err(hba->dev,
+ "%s: no response from device. tag = %d, err %d\n",
+ __func__, tag, err);
+ if (!err)
+ err = resp; /* service response error */
+ goto out;
+ }
+ }
+
+ if (!poll_cnt) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
+ UFS_ABORT_TASK, &resp);
+ if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
+ if (!err) {
+ err = resp; /* service response error */
+ dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
+ __func__, tag, err);
+ }
+ goto out;
+ }
+
+ err = ufshcd_clear_cmd(hba, tag);
+ if (err) {
+ dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
+ __func__, tag, err);
+ goto out;
+ }
+
+ scsi_dma_unmap(cmd);
+
+ spin_lock_irqsave(host->host_lock, flags);
+ ufshcd_outstanding_req_clear(hba, tag);
+ hba->lrb[tag].cmd = NULL;
+ spin_unlock_irqrestore(host->host_lock, flags);
+
+ clear_bit_unlock(tag, &hba->lrb_in_use);
+ wake_up(&hba->dev_cmd.tag_wq);
+
+out:
+ if (!err) {
+ err = SUCCESS;
+ } else {
+ dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
+ ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
+ err = FAILED;
+ }
+
+ /*
+ * This ufshcd_release() corresponds to the original scsi cmd that got
+ * aborted here (as we won't get any IRQ for it).
+ */
+ ufshcd_release(hba);
+ return err;
+}
+
+/**
+ * ufshcd_host_reset_and_restore - reset and restore host controller
+ * @hba: per-adapter instance
+ *
+ * Note that host controller reset may issue DME_RESET to
+ * local and remote (device) Uni-Pro stack and the attributes
+ * are reset to default state.
+ *
+ * Returns zero on success, non-zero on failure
+ */
+static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
+{
+ int err;
+ unsigned long flags;
+
+ /* Reset the host controller */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_hba_stop(hba, false);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ /* scale up clocks to max frequency before full reinitialization */
+ ufshcd_scale_clks(hba, true);
+
+ err = ufshcd_hba_enable(hba);
+ if (err)
+ goto out;
+
+ /* Establish the link again and restore the device */
+ err = ufshcd_probe_hba(hba);
+
+ if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
+ err = -EIO;
+out:
+ if (err)
+ dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
+
+ return err;
+}
+
+/**
+ * ufshcd_reset_and_restore - reset and re-initialize host/device
+ * @hba: per-adapter instance
+ *
+ * Reset and recover device, host and re-establish link. This
+ * is helpful to recover the communication in fatal error conditions.
+ *
+ * Returns zero on success, non-zero on failure
+ */
+static int ufshcd_reset_and_restore(struct ufs_hba *hba)
+{
+ int err = 0;
+ unsigned long flags;
+ int retries = MAX_HOST_RESET_RETRIES;
+
+ do {
+ err = ufshcd_host_reset_and_restore(hba);
+ } while (err && --retries);
+
+ /*
+ * After reset the door-bell might be cleared, complete
+ * outstanding requests in s/w here.
+ */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_transfer_req_compl(hba);
+ ufshcd_tmc_handler(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ return err;
+}
+
+/**
+ * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
+ * @cmd: SCSI command pointer
+ *
+ * Returns SUCCESS/FAILED
+ */
+static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
+{
+ int err;
+ unsigned long flags;
+ struct ufs_hba *hba;
+
+ hba = shost_priv(cmd->device->host);
+
+ ufshcd_hold(hba, false);
+ /*
+ * Check if there is any race with fatal error handling.
+ * If so, wait for it to complete. Even though fatal error
+ * handling does reset and restore in some cases, don't assume
+ * anything out of it. We are just avoiding race here.
+ */
+ do {
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (!(work_pending(&hba->eh_work) ||
+ hba->ufshcd_state == UFSHCD_STATE_RESET ||
+ hba->ufshcd_state == UFSHCD_STATE_EH_SCHEDULED))
+ break;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
+ flush_work(&hba->eh_work);
+ } while (1);
+
+ hba->ufshcd_state = UFSHCD_STATE_RESET;
+ ufshcd_set_eh_in_progress(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ err = ufshcd_reset_and_restore(hba);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (!err) {
+ err = SUCCESS;
+ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+ } else {
+ err = FAILED;
+ hba->ufshcd_state = UFSHCD_STATE_ERROR;
+ }
+ ufshcd_clear_eh_in_progress(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ ufshcd_release(hba);
+ return err;
+}
+
+/**
+ * ufshcd_get_max_icc_level - calculate the ICC level
+ * @sup_curr_uA: max. current supported by the regulator
+ * @start_scan: row at the desc table to start scan from
+ * @buff: power descriptor buffer
+ *
+ * Returns calculated max ICC level for specific regulator
+ */
+static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
+{
+ int i;
+ int curr_uA;
+ u16 data;
+ u16 unit;
+
+ for (i = start_scan; i >= 0; i--) {
+ data = be16_to_cpup((__be16 *)&buff[2 * i]);
+ unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
+ ATTR_ICC_LVL_UNIT_OFFSET;
+ curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
+ switch (unit) {
+ case UFSHCD_NANO_AMP:
+ curr_uA = curr_uA / 1000;
+ break;
+ case UFSHCD_MILI_AMP:
+ curr_uA = curr_uA * 1000;
+ break;
+ case UFSHCD_AMP:
+ curr_uA = curr_uA * 1000 * 1000;
+ break;
+ case UFSHCD_MICRO_AMP:
+ default:
+ break;
+ }
+ if (sup_curr_uA >= curr_uA)
+ break;
+ }
+ if (i < 0) {
+ i = 0;
+ pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
+ }
+
+ return (u32)i;
+}
+
+/**
+ * ufshcd_calc_icc_level - calculate the max ICC level
+ * In case regulators are not initialized we'll return 0
+ * @hba: per-adapter instance
+ * @desc_buf: power descriptor buffer to extract ICC levels from.
+ * @len: length of desc_buff
+ *
+ * Returns calculated ICC level
+ */
+static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
+ u8 *desc_buf, int len)
+{
+ u32 icc_level = 0;
+
+ if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
+ !hba->vreg_info.vccq2) {
+ dev_err(hba->dev,
+ "%s: Regulator capability was not set, actvIccLevel=%d",
+ __func__, icc_level);
+ goto out;
+ }
+
+ if (hba->vreg_info.vcc)
+ icc_level = ufshcd_get_max_icc_level(
+ hba->vreg_info.vcc->max_uA,
+ POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
+ &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
+
+ if (hba->vreg_info.vccq)
+ icc_level = ufshcd_get_max_icc_level(
+ hba->vreg_info.vccq->max_uA,
+ icc_level,
+ &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
+
+ if (hba->vreg_info.vccq2)
+ icc_level = ufshcd_get_max_icc_level(
+ hba->vreg_info.vccq2->max_uA,
+ icc_level,
+ &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
+out:
+ return icc_level;
+}
+
+static void ufshcd_init_icc_levels(struct ufs_hba *hba)
+{
+ int ret;
+ int buff_len = hba->desc_size.pwr_desc;
+ u8 *desc_buf;
+
+ desc_buf = kmalloc(buff_len, GFP_KERNEL);
+ if (!desc_buf)
+ return;
+
+ ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
+ if (ret) {
+ dev_err(hba->dev,
+ "%s: Failed reading power descriptor.len = %d ret = %d",
+ __func__, buff_len, ret);
+ goto out;
+ }
+
+ hba->init_prefetch_data.icc_level =
+ ufshcd_find_max_sup_active_icc_level(hba,
+ desc_buf, buff_len);
+ dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
+ __func__, hba->init_prefetch_data.icc_level);
+
+ ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
+ &hba->init_prefetch_data.icc_level);
+
+ if (ret)
+ dev_err(hba->dev,
+ "%s: Failed configuring bActiveICCLevel = %d ret = %d",
+ __func__, hba->init_prefetch_data.icc_level , ret);
+
+out:
+ kfree(desc_buf);
+}
+
+/**
+ * ufshcd_scsi_add_wlus - Adds required W-LUs
+ * @hba: per-adapter instance
+ *
+ * UFS device specification requires the UFS devices to support 4 well known
+ * logical units:
+ * "REPORT_LUNS" (address: 01h)
+ * "UFS Device" (address: 50h)
+ * "RPMB" (address: 44h)
+ * "BOOT" (address: 30h)
+ * UFS device's power management needs to be controlled by "POWER CONDITION"
+ * field of SSU (START STOP UNIT) command. But this "power condition" field
+ * will take effect only when its sent to "UFS device" well known logical unit
+ * hence we require the scsi_device instance to represent this logical unit in
+ * order for the UFS host driver to send the SSU command for power management.
+ *
+ * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
+ * Block) LU so user space process can control this LU. User space may also
+ * want to have access to BOOT LU.
+ *
+ * This function adds scsi device instances for each of all well known LUs
+ * (except "REPORT LUNS" LU).
+ *
+ * Returns zero on success (all required W-LUs are added successfully),
+ * non-zero error value on failure (if failed to add any of the required W-LU).
+ */
+static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
+{
+ int ret = 0;
+ struct scsi_device *sdev_rpmb;
+ struct scsi_device *sdev_boot;
+
+ hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
+ ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
+ if (IS_ERR(hba->sdev_ufs_device)) {
+ ret = PTR_ERR(hba->sdev_ufs_device);
+ hba->sdev_ufs_device = NULL;
+ goto out;
+ }
+ scsi_device_put(hba->sdev_ufs_device);
+
+ sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
+ ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
+ if (IS_ERR(sdev_rpmb)) {
+ ret = PTR_ERR(sdev_rpmb);
+ goto remove_sdev_ufs_device;
+ }
+ scsi_device_put(sdev_rpmb);
+
+ sdev_boot = __scsi_add_device(hba->host, 0, 0,
+ ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
+ if (IS_ERR(sdev_boot))
+ dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
+ else
+ scsi_device_put(sdev_boot);
+ goto out;
+
+remove_sdev_ufs_device:
+ scsi_remove_device(hba->sdev_ufs_device);
+out:
+ return ret;
+}
+
+static int ufs_get_device_desc(struct ufs_hba *hba,
+ struct ufs_dev_desc *dev_desc)
+{
+ int err;
+ size_t buff_len;
+ u8 model_index;
+ u8 *desc_buf;
+
+ buff_len = max_t(size_t, hba->desc_size.dev_desc,
+ QUERY_DESC_MAX_SIZE + 1);
+ desc_buf = kmalloc(buff_len, GFP_KERNEL);
+ if (!desc_buf) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
+ if (err) {
+ dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ /*
+ * getting vendor (manufacturerID) and Bank Index in big endian
+ * format
+ */
+ dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
+ desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
+
+ model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
+
+ /* Zero-pad entire buffer for string termination. */
+ memset(desc_buf, 0, buff_len);
+
+ err = ufshcd_read_string_desc(hba, model_index, desc_buf,
+ QUERY_DESC_MAX_SIZE, true/*ASCII*/);
+ if (err) {
+ dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
+ strlcpy(dev_desc->model, (desc_buf + QUERY_DESC_HDR_SIZE),
+ min_t(u8, desc_buf[QUERY_DESC_LENGTH_OFFSET],
+ MAX_MODEL_LEN));
+
+ /* Null terminate the model string */
+ dev_desc->model[MAX_MODEL_LEN] = '\0';
+
+out:
+ kfree(desc_buf);
+ return err;
+}
+
+static void ufs_fixup_device_setup(struct ufs_hba *hba,
+ struct ufs_dev_desc *dev_desc)
+{
+ struct ufs_dev_fix *f;
+
+ for (f = ufs_fixups; f->quirk; f++) {
+ if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid ||
+ f->card.wmanufacturerid == UFS_ANY_VENDOR) &&
+ (STR_PRFX_EQUAL(f->card.model, dev_desc->model) ||
+ !strcmp(f->card.model, UFS_ANY_MODEL)))
+ hba->dev_quirks |= f->quirk;
+ }
+}
+
+/**
+ * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
+ * @hba: per-adapter instance
+ *
+ * PA_TActivate parameter can be tuned manually if UniPro version is less than
+ * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
+ * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
+ * the hibern8 exit latency.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
+{
+ int ret = 0;
+ u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
+
+ ret = ufshcd_dme_peer_get(hba,
+ UIC_ARG_MIB_SEL(
+ RX_MIN_ACTIVATETIME_CAPABILITY,
+ UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
+ &peer_rx_min_activatetime);
+ if (ret)
+ goto out;
+
+ /* make sure proper unit conversion is applied */
+ tuned_pa_tactivate =
+ ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
+ / PA_TACTIVATE_TIME_UNIT_US);
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
+ tuned_pa_tactivate);
+
+out:
+ return ret;
+}
+
+/**
+ * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
+ * @hba: per-adapter instance
+ *
+ * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
+ * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
+ * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
+ * This optimal value can help reduce the hibern8 exit latency.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
+{
+ int ret = 0;
+ u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
+ u32 max_hibern8_time, tuned_pa_hibern8time;
+
+ ret = ufshcd_dme_get(hba,
+ UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
+ UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
+ &local_tx_hibern8_time_cap);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_dme_peer_get(hba,
+ UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
+ UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
+ &peer_rx_hibern8_time_cap);
+ if (ret)
+ goto out;
+
+ max_hibern8_time = max(local_tx_hibern8_time_cap,
+ peer_rx_hibern8_time_cap);
+ /* make sure proper unit conversion is applied */
+ tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
+ / PA_HIBERN8_TIME_UNIT_US);
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
+ tuned_pa_hibern8time);
+out:
+ return ret;
+}
+
+/**
+ * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
+ * less than device PA_TACTIVATE time.
+ * @hba: per-adapter instance
+ *
+ * Some UFS devices require host PA_TACTIVATE to be lower than device
+ * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
+ * for such devices.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
+{
+ int ret = 0;
+ u32 granularity, peer_granularity;
+ u32 pa_tactivate, peer_pa_tactivate;
+ u32 pa_tactivate_us, peer_pa_tactivate_us;
+ u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
+ &granularity);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
+ &peer_granularity);
+ if (ret)
+ goto out;
+
+ if ((granularity < PA_GRANULARITY_MIN_VAL) ||
+ (granularity > PA_GRANULARITY_MAX_VAL)) {
+ dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
+ __func__, granularity);
+ return -EINVAL;
+ }
+
+ if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
+ (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
+ dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
+ __func__, peer_granularity);
+ return -EINVAL;
+ }
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
+ &peer_pa_tactivate);
+ if (ret)
+ goto out;
+
+ pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
+ peer_pa_tactivate_us = peer_pa_tactivate *
+ gran_to_us_table[peer_granularity - 1];
+
+ if (pa_tactivate_us > peer_pa_tactivate_us) {
+ u32 new_peer_pa_tactivate;
+
+ new_peer_pa_tactivate = pa_tactivate_us /
+ gran_to_us_table[peer_granularity - 1];
+ new_peer_pa_tactivate++;
+ ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
+ new_peer_pa_tactivate);
+ }
+
+out:
+ return ret;
+}
+
+static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
+{
+ if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
+ ufshcd_tune_pa_tactivate(hba);
+ ufshcd_tune_pa_hibern8time(hba);
+ }
+
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
+ /* set 1ms timeout for PA_TACTIVATE */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
+
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
+ ufshcd_quirk_tune_host_pa_tactivate(hba);
+
+ ufshcd_vops_apply_dev_quirks(hba);
+}
+
+static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
+{
+ int err_reg_hist_size = sizeof(struct ufs_uic_err_reg_hist);
+
+ hba->ufs_stats.hibern8_exit_cnt = 0;
+ hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
+
+ memset(&hba->ufs_stats.pa_err, 0, err_reg_hist_size);
+ memset(&hba->ufs_stats.dl_err, 0, err_reg_hist_size);
+ memset(&hba->ufs_stats.nl_err, 0, err_reg_hist_size);
+ memset(&hba->ufs_stats.tl_err, 0, err_reg_hist_size);
+ memset(&hba->ufs_stats.dme_err, 0, err_reg_hist_size);
+
+ hba->req_abort_count = 0;
+}
+
+static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
+{
+ int err;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
+ &hba->desc_size.dev_desc);
+ if (err)
+ hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
+ &hba->desc_size.pwr_desc);
+ if (err)
+ hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
+ &hba->desc_size.interc_desc);
+ if (err)
+ hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
+ &hba->desc_size.conf_desc);
+ if (err)
+ hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
+ &hba->desc_size.unit_desc);
+ if (err)
+ hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
+ &hba->desc_size.geom_desc);
+ if (err)
+ hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0,
+ &hba->desc_size.hlth_desc);
+ if (err)
+ hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
+}
+
+static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
+{
+ hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
+ hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
+ hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
+ hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
+ hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
+ hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
+ hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
+}
+
+/**
+ * ufshcd_probe_hba - probe hba to detect device and initialize
+ * @hba: per-adapter instance
+ *
+ * Execute link-startup and verify device initialization
+ */
+static int ufshcd_probe_hba(struct ufs_hba *hba)
+{
+ struct ufs_dev_desc card = {0};
+ int ret;
+ ktime_t start = ktime_get();
+
+ ret = ufshcd_link_startup(hba);
+ if (ret)
+ goto out;
+
+ /* set the default level for urgent bkops */
+ hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
+ hba->is_urgent_bkops_lvl_checked = false;
+
+ /* Debug counters initialization */
+ ufshcd_clear_dbg_ufs_stats(hba);
+
+ /* UniPro link is active now */
+ ufshcd_set_link_active(hba);
+
+ /* Enable Auto-Hibernate if configured */
+ ufshcd_auto_hibern8_enable(hba);
+
+ ret = ufshcd_verify_dev_init(hba);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_complete_dev_init(hba);
+ if (ret)
+ goto out;
+
+ /* Init check for device descriptor sizes */
+ ufshcd_init_desc_sizes(hba);
+
+ ret = ufs_get_device_desc(hba, &card);
+ if (ret) {
+ dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
+ __func__, ret);
+ goto out;
+ }
+
+ ufs_fixup_device_setup(hba, &card);
+ ufshcd_tune_unipro_params(hba);
+
+ ret = ufshcd_set_vccq_rail_unused(hba,
+ (hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false);
+ if (ret)
+ goto out;
+
+ /* UFS device is also active now */
+ ufshcd_set_ufs_dev_active(hba);
+ ufshcd_force_reset_auto_bkops(hba);
+ hba->wlun_dev_clr_ua = true;
+
+ if (ufshcd_get_max_pwr_mode(hba)) {
+ dev_err(hba->dev,
+ "%s: Failed getting max supported power mode\n",
+ __func__);
+ } else {
+ ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
+ if (ret) {
+ dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
+ __func__, ret);
+ goto out;
+ }
+ }
+
+ /* set the state as operational after switching to desired gear */
+ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+
+ /*
+ * If we are in error handling context or in power management callbacks
+ * context, no need to scan the host
+ */
+ if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
+ bool flag;
+
+ /* clear any previous UFS device information */
+ memset(&hba->dev_info, 0, sizeof(hba->dev_info));
+ if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
+ QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
+ hba->dev_info.f_power_on_wp_en = flag;
+
+ if (!hba->is_init_prefetch)
+ ufshcd_init_icc_levels(hba);
+
+ /* Add required well known logical units to scsi mid layer */
+ if (ufshcd_scsi_add_wlus(hba))
+ goto out;
+
+ /* Initialize devfreq after UFS device is detected */
+ if (ufshcd_is_clkscaling_supported(hba)) {
+ memcpy(&hba->clk_scaling.saved_pwr_info.info,
+ &hba->pwr_info,
+ sizeof(struct ufs_pa_layer_attr));
+ hba->clk_scaling.saved_pwr_info.is_valid = true;
+ if (!hba->devfreq) {
+ ret = ufshcd_devfreq_init(hba);
+ if (ret)
+ goto out;
+ }
+ hba->clk_scaling.is_allowed = true;
+ }
+
+ scsi_scan_host(hba->host);
+ pm_runtime_put_sync(hba->dev);
+ }
+
+ if (!hba->is_init_prefetch)
+ hba->is_init_prefetch = true;
+
+out:
+ /*
+ * If we failed to initialize the device or the device is not
+ * present, turn off the power/clocks etc.
+ */
+ if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
+ pm_runtime_put_sync(hba->dev);
+ ufshcd_hba_exit(hba);
+ }
+
+ trace_ufshcd_init(dev_name(hba->dev), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)),
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
+ return ret;
+}
+
+/**
+ * ufshcd_async_scan - asynchronous execution for probing hba
+ * @data: data pointer to pass to this function
+ * @cookie: cookie data
+ */
+static void ufshcd_async_scan(void *data, async_cookie_t cookie)
+{
+ struct ufs_hba *hba = (struct ufs_hba *)data;
+
+ ufshcd_probe_hba(hba);
+}
+
+static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
+{
+ unsigned long flags;
+ struct Scsi_Host *host;
+ struct ufs_hba *hba;
+ int index;
+ bool found = false;
+
+ if (!scmd || !scmd->device || !scmd->device->host)
+ return BLK_EH_DONE;
+
+ host = scmd->device->host;
+ hba = shost_priv(host);
+ if (!hba)
+ return BLK_EH_DONE;
+
+ spin_lock_irqsave(host->host_lock, flags);
+
+ for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
+ if (hba->lrb[index].cmd == scmd) {
+ found = true;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(host->host_lock, flags);
+
+ /*
+ * Bypass SCSI error handling and reset the block layer timer if this
+ * SCSI command was not actually dispatched to UFS driver, otherwise
+ * let SCSI layer handle the error as usual.
+ */
+ return found ? BLK_EH_DONE : BLK_EH_RESET_TIMER;
+}
+
+static const struct attribute_group *ufshcd_driver_groups[] = {
+ &ufs_sysfs_unit_descriptor_group,
+ &ufs_sysfs_lun_attributes_group,
+ NULL,
+};
+
+static struct scsi_host_template ufshcd_driver_template = {
+ .module = THIS_MODULE,
+ .name = UFSHCD,
+ .proc_name = UFSHCD,
+ .queuecommand = ufshcd_queuecommand,
+ .slave_alloc = ufshcd_slave_alloc,
+ .slave_configure = ufshcd_slave_configure,
+ .slave_destroy = ufshcd_slave_destroy,
+ .change_queue_depth = ufshcd_change_queue_depth,
+ .eh_abort_handler = ufshcd_abort,
+ .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
+ .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
+ .eh_timed_out = ufshcd_eh_timed_out,
+ .this_id = -1,
+ .sg_tablesize = SG_ALL,
+ .cmd_per_lun = UFSHCD_CMD_PER_LUN,
+ .can_queue = UFSHCD_CAN_QUEUE,
+ .max_host_blocked = 1,
+ .track_queue_depth = 1,
+ .sdev_groups = ufshcd_driver_groups,
+ .no_write_same = 1,
+};
+
+static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
+ int ua)
+{
+ int ret;
+
+ if (!vreg)
+ return 0;
+
+ ret = regulator_set_load(vreg->reg, ua);
+ if (ret < 0) {
+ dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
+ __func__, vreg->name, ua, ret);
+ }
+
+ return ret;
+}
+
+static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
+ struct ufs_vreg *vreg)
+{
+ if (!vreg)
+ return 0;
+ else if (vreg->unused)
+ return 0;
+ else
+ return ufshcd_config_vreg_load(hba->dev, vreg,
+ UFS_VREG_LPM_LOAD_UA);
+}
+
+static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
+ struct ufs_vreg *vreg)
+{
+ if (!vreg)
+ return 0;
+ else if (vreg->unused)
+ return 0;
+ else
+ return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
+}
+
+static int ufshcd_config_vreg(struct device *dev,
+ struct ufs_vreg *vreg, bool on)
+{
+ int ret = 0;
+ struct regulator *reg;
+ const char *name;
+ int min_uV, uA_load;
+
+ BUG_ON(!vreg);
+
+ reg = vreg->reg;
+ name = vreg->name;
+
+ if (regulator_count_voltages(reg) > 0) {
+ min_uV = on ? vreg->min_uV : 0;
+ ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
+ if (ret) {
+ dev_err(dev, "%s: %s set voltage failed, err=%d\n",
+ __func__, name, ret);
+ goto out;
+ }
+
+ uA_load = on ? vreg->max_uA : 0;
+ ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
+ if (ret)
+ goto out;
+ }
+out:
+ return ret;
+}
+
+static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
+{
+ int ret = 0;
+
+ if (!vreg)
+ goto out;
+ else if (vreg->enabled || vreg->unused)
+ goto out;
+
+ ret = ufshcd_config_vreg(dev, vreg, true);
+ if (!ret)
+ ret = regulator_enable(vreg->reg);
+
+ if (!ret)
+ vreg->enabled = true;
+ else
+ dev_err(dev, "%s: %s enable failed, err=%d\n",
+ __func__, vreg->name, ret);
+out:
+ return ret;
+}
+
+static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
+{
+ int ret = 0;
+
+ if (!vreg)
+ goto out;
+ else if (!vreg->enabled || vreg->unused)
+ goto out;
+
+ ret = regulator_disable(vreg->reg);
+
+ if (!ret) {
+ /* ignore errors on applying disable config */
+ ufshcd_config_vreg(dev, vreg, false);
+ vreg->enabled = false;
+ } else {
+ dev_err(dev, "%s: %s disable failed, err=%d\n",
+ __func__, vreg->name, ret);
+ }
+out:
+ return ret;
+}
+
+static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
+{
+ int ret = 0;
+ struct device *dev = hba->dev;
+ struct ufs_vreg_info *info = &hba->vreg_info;
+
+ if (!info)
+ goto out;
+
+ ret = ufshcd_toggle_vreg(dev, info->vcc, on);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_toggle_vreg(dev, info->vccq, on);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
+ if (ret)
+ goto out;
+
+out:
+ if (ret) {
+ ufshcd_toggle_vreg(dev, info->vccq2, false);
+ ufshcd_toggle_vreg(dev, info->vccq, false);
+ ufshcd_toggle_vreg(dev, info->vcc, false);
+ }
+ return ret;
+}
+
+static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
+{
+ struct ufs_vreg_info *info = &hba->vreg_info;
+
+ if (info)
+ return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
+
+ return 0;
+}
+
+static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
+{
+ int ret = 0;
+
+ if (!vreg)
+ goto out;
+
+ vreg->reg = devm_regulator_get(dev, vreg->name);
+ if (IS_ERR(vreg->reg)) {
+ ret = PTR_ERR(vreg->reg);
+ dev_err(dev, "%s: %s get failed, err=%d\n",
+ __func__, vreg->name, ret);
+ }
+out:
+ return ret;
+}
+
+static int ufshcd_init_vreg(struct ufs_hba *hba)
+{
+ int ret = 0;
+ struct device *dev = hba->dev;
+ struct ufs_vreg_info *info = &hba->vreg_info;
+
+ if (!info)
+ goto out;
+
+ ret = ufshcd_get_vreg(dev, info->vcc);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_get_vreg(dev, info->vccq);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_get_vreg(dev, info->vccq2);
+out:
+ return ret;
+}
+
+static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
+{
+ struct ufs_vreg_info *info = &hba->vreg_info;
+
+ if (info)
+ return ufshcd_get_vreg(hba->dev, info->vdd_hba);
+
+ return 0;
+}
+
+static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused)
+{
+ int ret = 0;
+ struct ufs_vreg_info *info = &hba->vreg_info;
+
+ if (!info)
+ goto out;
+ else if (!info->vccq)
+ goto out;
+
+ if (unused) {
+ /* shut off the rail here */
+ ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false);
+ /*
+ * Mark this rail as no longer used, so it doesn't get enabled
+ * later by mistake
+ */
+ if (!ret)
+ info->vccq->unused = true;
+ } else {
+ /*
+ * rail should have been already enabled hence just make sure
+ * that unused flag is cleared.
+ */
+ info->vccq->unused = false;
+ }
+out:
+ return ret;
+}
+
+static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
+ bool skip_ref_clk)
+{
+ int ret = 0;
+ struct ufs_clk_info *clki;
+ struct list_head *head = &hba->clk_list_head;
+ unsigned long flags;
+ ktime_t start = ktime_get();
+ bool clk_state_changed = false;
+
+ if (list_empty(head))
+ goto out;
+
+ /*
+ * vendor specific setup_clocks ops may depend on clocks managed by
+ * this standard driver hence call the vendor specific setup_clocks
+ * before disabling the clocks managed here.
+ */
+ if (!on) {
+ ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
+ if (ret)
+ return ret;
+ }
+
+ list_for_each_entry(clki, head, list) {
+ if (!IS_ERR_OR_NULL(clki->clk)) {
+ if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
+ continue;
+
+ clk_state_changed = on ^ clki->enabled;
+ if (on && !clki->enabled) {
+ ret = clk_prepare_enable(clki->clk);
+ if (ret) {
+ dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
+ __func__, clki->name, ret);
+ goto out;
+ }
+ } else if (!on && clki->enabled) {
+ clk_disable_unprepare(clki->clk);
+ }
+ clki->enabled = on;
+ dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
+ clki->name, on ? "en" : "dis");
+ }
+ }
+
+ /*
+ * vendor specific setup_clocks ops may depend on clocks managed by
+ * this standard driver hence call the vendor specific setup_clocks
+ * after enabling the clocks managed here.
+ */
+ if (on) {
+ ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
+ if (ret)
+ return ret;
+ }
+
+out:
+ if (ret) {
+ list_for_each_entry(clki, head, list) {
+ if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
+ clk_disable_unprepare(clki->clk);
+ }
+ } else if (!ret && on) {
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->clk_gating.state = CLKS_ON;
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
+ hba->clk_gating.state);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ }
+
+ if (clk_state_changed)
+ trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
+ (on ? "on" : "off"),
+ ktime_to_us(ktime_sub(ktime_get(), start)), ret);
+ return ret;
+}
+
+static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
+{
+ return __ufshcd_setup_clocks(hba, on, false);
+}
+
+static int ufshcd_init_clocks(struct ufs_hba *hba)
+{
+ int ret = 0;
+ struct ufs_clk_info *clki;
+ struct device *dev = hba->dev;
+ struct list_head *head = &hba->clk_list_head;
+
+ if (list_empty(head))
+ goto out;
+
+ list_for_each_entry(clki, head, list) {
+ if (!clki->name)
+ continue;
+
+ clki->clk = devm_clk_get(dev, clki->name);
+ if (IS_ERR(clki->clk)) {
+ ret = PTR_ERR(clki->clk);
+ dev_err(dev, "%s: %s clk get failed, %d\n",
+ __func__, clki->name, ret);
+ goto out;
+ }
+
+ if (clki->max_freq) {
+ ret = clk_set_rate(clki->clk, clki->max_freq);
+ if (ret) {
+ dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
+ __func__, clki->name,
+ clki->max_freq, ret);
+ goto out;
+ }
+ clki->curr_freq = clki->max_freq;
+ }
+ dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
+ clki->name, clk_get_rate(clki->clk));
+ }
+out:
+ return ret;
+}
+
+static int ufshcd_variant_hba_init(struct ufs_hba *hba)
+{
+ int err = 0;
+
+ if (!hba->vops)
+ goto out;
+
+ err = ufshcd_vops_init(hba);
+ if (err)
+ goto out;
+
+ err = ufshcd_vops_setup_regulators(hba, true);
+ if (err)
+ goto out_exit;
+
+ goto out;
+
+out_exit:
+ ufshcd_vops_exit(hba);
+out:
+ if (err)
+ dev_err(hba->dev, "%s: variant %s init failed err %d\n",
+ __func__, ufshcd_get_var_name(hba), err);
+ return err;
+}
+
+static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
+{
+ if (!hba->vops)
+ return;
+
+ ufshcd_vops_setup_regulators(hba, false);
+
+ ufshcd_vops_exit(hba);
+}
+
+static int ufshcd_hba_init(struct ufs_hba *hba)
+{
+ int err;
+
+ /*
+ * Handle host controller power separately from the UFS device power
+ * rails as it will help controlling the UFS host controller power
+ * collapse easily which is different than UFS device power collapse.
+ * Also, enable the host controller power before we go ahead with rest
+ * of the initialization here.
+ */
+ err = ufshcd_init_hba_vreg(hba);
+ if (err)
+ goto out;
+
+ err = ufshcd_setup_hba_vreg(hba, true);
+ if (err)
+ goto out;
+
+ err = ufshcd_init_clocks(hba);
+ if (err)
+ goto out_disable_hba_vreg;
+
+ err = ufshcd_setup_clocks(hba, true);
+ if (err)
+ goto out_disable_hba_vreg;
+
+ err = ufshcd_init_vreg(hba);
+ if (err)
+ goto out_disable_clks;
+
+ err = ufshcd_setup_vreg(hba, true);
+ if (err)
+ goto out_disable_clks;
+
+ err = ufshcd_variant_hba_init(hba);
+ if (err)
+ goto out_disable_vreg;
+
+ hba->is_powered = true;
+ goto out;
+
+out_disable_vreg:
+ ufshcd_setup_vreg(hba, false);
+out_disable_clks:
+ ufshcd_setup_clocks(hba, false);
+out_disable_hba_vreg:
+ ufshcd_setup_hba_vreg(hba, false);
+out:
+ return err;
+}
+
+static void ufshcd_hba_exit(struct ufs_hba *hba)
+{
+ if (hba->is_powered) {
+ ufshcd_variant_hba_exit(hba);
+ ufshcd_setup_vreg(hba, false);
+ ufshcd_suspend_clkscaling(hba);
+ if (ufshcd_is_clkscaling_supported(hba)) {
+ if (hba->devfreq)
+ ufshcd_suspend_clkscaling(hba);
+ destroy_workqueue(hba->clk_scaling.workq);
+ ufshcd_devfreq_remove(hba);
+ }
+ ufshcd_setup_clocks(hba, false);
+ ufshcd_setup_hba_vreg(hba, false);
+ hba->is_powered = false;
+ }
+}
+
+static int
+ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
+{
+ unsigned char cmd[6] = {REQUEST_SENSE,
+ 0,
+ 0,
+ 0,
+ UFSHCD_REQ_SENSE_SIZE,
+ 0};
+ char *buffer;
+ int ret;
+
+ buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL);
+ if (!buffer) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer,
+ UFSHCD_REQ_SENSE_SIZE, NULL, NULL,
+ msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL);
+ if (ret)
+ pr_err("%s: failed with err %d\n", __func__, ret);
+
+ kfree(buffer);
+out:
+ return ret;
+}
+
+/**
+ * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
+ * power mode
+ * @hba: per adapter instance
+ * @pwr_mode: device power mode to set
+ *
+ * Returns 0 if requested power mode is set successfully
+ * Returns non-zero if failed to set the requested power mode
+ */
+static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
+ enum ufs_dev_pwr_mode pwr_mode)
+{
+ unsigned char cmd[6] = { START_STOP };
+ struct scsi_sense_hdr sshdr;
+ struct scsi_device *sdp;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ sdp = hba->sdev_ufs_device;
+ if (sdp) {
+ ret = scsi_device_get(sdp);
+ if (!ret && !scsi_device_online(sdp)) {
+ ret = -ENODEV;
+ scsi_device_put(sdp);
+ }
+ } else {
+ ret = -ENODEV;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ if (ret)
+ return ret;
+
+ /*
+ * If scsi commands fail, the scsi mid-layer schedules scsi error-
+ * handling, which would wait for host to be resumed. Since we know
+ * we are functional while we are here, skip host resume in error
+ * handling context.
+ */
+ hba->host->eh_noresume = 1;
+ if (hba->wlun_dev_clr_ua) {
+ ret = ufshcd_send_request_sense(hba, sdp);
+ if (ret)
+ goto out;
+ /* Unit attention condition is cleared now */
+ hba->wlun_dev_clr_ua = false;
+ }
+
+ cmd[4] = pwr_mode << 4;
+
+ /*
+ * Current function would be generally called from the power management
+ * callbacks hence set the RQF_PM flag so that it doesn't resume the
+ * already suspended childs.
+ */
+ ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
+ START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
+ if (ret) {
+ sdev_printk(KERN_WARNING, sdp,
+ "START_STOP failed for power mode: %d, result %x\n",
+ pwr_mode, ret);
+ if (driver_byte(ret) & DRIVER_SENSE)
+ scsi_print_sense_hdr(sdp, NULL, &sshdr);
+ }
+
+ if (!ret)
+ hba->curr_dev_pwr_mode = pwr_mode;
+out:
+ scsi_device_put(sdp);
+ hba->host->eh_noresume = 0;
+ return ret;
+}
+
+static int ufshcd_link_state_transition(struct ufs_hba *hba,
+ enum uic_link_state req_link_state,
+ int check_for_bkops)
+{
+ int ret = 0;
+
+ if (req_link_state == hba->uic_link_state)
+ return 0;
+
+ if (req_link_state == UIC_LINK_HIBERN8_STATE) {
+ ret = ufshcd_uic_hibern8_enter(hba);
+ if (!ret)
+ ufshcd_set_link_hibern8(hba);
+ else
+ goto out;
+ }
+ /*
+ * If autobkops is enabled, link can't be turned off because
+ * turning off the link would also turn off the device.
+ */
+ else if ((req_link_state == UIC_LINK_OFF_STATE) &&
+ (!check_for_bkops || (check_for_bkops &&
+ !hba->auto_bkops_enabled))) {
+ /*
+ * Let's make sure that link is in low power mode, we are doing
+ * this currently by putting the link in Hibern8. Otherway to
+ * put the link in low power mode is to send the DME end point
+ * to device and then send the DME reset command to local
+ * unipro. But putting the link in hibern8 is much faster.
+ */
+ ret = ufshcd_uic_hibern8_enter(hba);
+ if (ret)
+ goto out;
+ /*
+ * Change controller state to "reset state" which
+ * should also put the link in off/reset state
+ */
+ ufshcd_hba_stop(hba, true);
+ /*
+ * TODO: Check if we need any delay to make sure that
+ * controller is reset
+ */
+ ufshcd_set_link_off(hba);
+ }
+
+out:
+ return ret;
+}
+
+static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
+{
+ /*
+ * It seems some UFS devices may keep drawing more than sleep current
+ * (atleast for 500us) from UFS rails (especially from VCCQ rail).
+ * To avoid this situation, add 2ms delay before putting these UFS
+ * rails in LPM mode.
+ */
+ if (!ufshcd_is_link_active(hba) &&
+ hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
+ usleep_range(2000, 2100);
+
+ /*
+ * If UFS device is either in UFS_Sleep turn off VCC rail to save some
+ * power.
+ *
+ * If UFS device and link is in OFF state, all power supplies (VCC,
+ * VCCQ, VCCQ2) can be turned off if power on write protect is not
+ * required. If UFS link is inactive (Hibern8 or OFF state) and device
+ * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
+ *
+ * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
+ * in low power state which would save some power.
+ */
+ if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
+ !hba->dev_info.is_lu_power_on_wp) {
+ ufshcd_setup_vreg(hba, false);
+ } else if (!ufshcd_is_ufs_dev_active(hba)) {
+ ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
+ if (!ufshcd_is_link_active(hba)) {
+ ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
+ ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
+ }
+ }
+}
+
+static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
+{
+ int ret = 0;
+
+ if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
+ !hba->dev_info.is_lu_power_on_wp) {
+ ret = ufshcd_setup_vreg(hba, true);
+ } else if (!ufshcd_is_ufs_dev_active(hba)) {
+ if (!ret && !ufshcd_is_link_active(hba)) {
+ ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
+ if (ret)
+ goto vcc_disable;
+ ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
+ if (ret)
+ goto vccq_lpm;
+ }
+ ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
+ }
+ goto out;
+
+vccq_lpm:
+ ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
+vcc_disable:
+ ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
+out:
+ return ret;
+}
+
+static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
+{
+ if (ufshcd_is_link_off(hba))
+ ufshcd_setup_hba_vreg(hba, false);
+}
+
+static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
+{
+ if (ufshcd_is_link_off(hba))
+ ufshcd_setup_hba_vreg(hba, true);
+}
+
+/**
+ * ufshcd_suspend - helper function for suspend operations
+ * @hba: per adapter instance
+ * @pm_op: desired low power operation type
+ *
+ * This function will try to put the UFS device and link into low power
+ * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
+ * (System PM level).
+ *
+ * If this function is called during shutdown, it will make sure that
+ * both UFS device and UFS link is powered off.
+ *
+ * NOTE: UFS device & link must be active before we enter in this function.
+ *
+ * Returns 0 for success and non-zero for failure
+ */
+static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+{
+ int ret = 0;
+ enum ufs_pm_level pm_lvl;
+ enum ufs_dev_pwr_mode req_dev_pwr_mode;
+ enum uic_link_state req_link_state;
+
+ hba->pm_op_in_progress = 1;
+ if (!ufshcd_is_shutdown_pm(pm_op)) {
+ pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
+ hba->rpm_lvl : hba->spm_lvl;
+ req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
+ req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
+ } else {
+ req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
+ req_link_state = UIC_LINK_OFF_STATE;
+ }
+
+ /*
+ * If we can't transition into any of the low power modes
+ * just gate the clocks.
+ */
+ ufshcd_hold(hba, false);
+ hba->clk_gating.is_suspended = true;
+
+ if (hba->clk_scaling.is_allowed) {
+ cancel_work_sync(&hba->clk_scaling.suspend_work);
+ cancel_work_sync(&hba->clk_scaling.resume_work);
+ ufshcd_suspend_clkscaling(hba);
+ }
+
+ if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
+ req_link_state == UIC_LINK_ACTIVE_STATE) {
+ goto disable_clks;
+ }
+
+ if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
+ (req_link_state == hba->uic_link_state))
+ goto enable_gating;
+
+ /* UFS device & link must be active before we enter in this function */
+ if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
+ ret = -EINVAL;
+ goto enable_gating;
+ }
+
+ if (ufshcd_is_runtime_pm(pm_op)) {
+ if (ufshcd_can_autobkops_during_suspend(hba)) {
+ /*
+ * The device is idle with no requests in the queue,
+ * allow background operations if bkops status shows
+ * that performance might be impacted.
+ */
+ ret = ufshcd_urgent_bkops(hba);
+ if (ret)
+ goto enable_gating;
+ } else {
+ /* make sure that auto bkops is disabled */
+ ufshcd_disable_auto_bkops(hba);
+ }
+ }
+
+ if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
+ ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
+ !ufshcd_is_runtime_pm(pm_op))) {
+ /* ensure that bkops is disabled */
+ ufshcd_disable_auto_bkops(hba);
+ ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
+ if (ret)
+ goto enable_gating;
+ }
+
+ ret = ufshcd_link_state_transition(hba, req_link_state, 1);
+ if (ret)
+ goto set_dev_active;
+
+ ufshcd_vreg_set_lpm(hba);
+
+disable_clks:
+ /*
+ * Call vendor specific suspend callback. As these callbacks may access
+ * vendor specific host controller register space call them before the
+ * host clocks are ON.
+ */
+ ret = ufshcd_vops_suspend(hba, pm_op);
+ if (ret)
+ goto set_link_active;
+
+ if (!ufshcd_is_link_active(hba))
+ ufshcd_setup_clocks(hba, false);
+ else
+ /* If link is active, device ref_clk can't be switched off */
+ __ufshcd_setup_clocks(hba, false, true);
+
+ hba->clk_gating.state = CLKS_OFF;
+ trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
+ /*
+ * Disable the host irq as host controller as there won't be any
+ * host controller transaction expected till resume.
+ */
+ ufshcd_disable_irq(hba);
+ /* Put the host controller in low power mode if possible */
+ ufshcd_hba_vreg_set_lpm(hba);
+ goto out;
+
+set_link_active:
+ if (hba->clk_scaling.is_allowed)
+ ufshcd_resume_clkscaling(hba);
+ ufshcd_vreg_set_hpm(hba);
+ if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
+ ufshcd_set_link_active(hba);
+ else if (ufshcd_is_link_off(hba))
+ ufshcd_host_reset_and_restore(hba);
+set_dev_active:
+ if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
+ ufshcd_disable_auto_bkops(hba);
+enable_gating:
+ if (hba->clk_scaling.is_allowed)
+ ufshcd_resume_clkscaling(hba);
+ hba->clk_gating.is_suspended = false;
+ ufshcd_release(hba);
+out:
+ hba->pm_op_in_progress = 0;
+ return ret;
+}
+
+/**
+ * ufshcd_resume - helper function for resume operations
+ * @hba: per adapter instance
+ * @pm_op: runtime PM or system PM
+ *
+ * This function basically brings the UFS device, UniPro link and controller
+ * to active state.
+ *
+ * Returns 0 for success and non-zero for failure
+ */
+static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+{
+ int ret;
+ enum uic_link_state old_link_state;
+
+ hba->pm_op_in_progress = 1;
+ old_link_state = hba->uic_link_state;
+
+ ufshcd_hba_vreg_set_hpm(hba);
+ /* Make sure clocks are enabled before accessing controller */
+ ret = ufshcd_setup_clocks(hba, true);
+ if (ret)
+ goto out;
+
+ /* enable the host irq as host controller would be active soon */
+ ret = ufshcd_enable_irq(hba);
+ if (ret)
+ goto disable_irq_and_vops_clks;
+
+ ret = ufshcd_vreg_set_hpm(hba);
+ if (ret)
+ goto disable_irq_and_vops_clks;
+
+ /*
+ * Call vendor specific resume callback. As these callbacks may access
+ * vendor specific host controller register space call them when the
+ * host clocks are ON.
+ */
+ ret = ufshcd_vops_resume(hba, pm_op);
+ if (ret)
+ goto disable_vreg;
+
+ if (ufshcd_is_link_hibern8(hba)) {
+ ret = ufshcd_uic_hibern8_exit(hba);
+ if (!ret)
+ ufshcd_set_link_active(hba);
+ else
+ goto vendor_suspend;
+ } else if (ufshcd_is_link_off(hba)) {
+ ret = ufshcd_host_reset_and_restore(hba);
+ /*
+ * ufshcd_host_reset_and_restore() should have already
+ * set the link state as active
+ */
+ if (ret || !ufshcd_is_link_active(hba))
+ goto vendor_suspend;
+ }
+
+ if (!ufshcd_is_ufs_dev_active(hba)) {
+ ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
+ if (ret)
+ goto set_old_link_state;
+ }
+
+ if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
+ ufshcd_enable_auto_bkops(hba);
+ else
+ /*
+ * If BKOPs operations are urgently needed at this moment then
+ * keep auto-bkops enabled or else disable it.
+ */
+ ufshcd_urgent_bkops(hba);
+
+ hba->clk_gating.is_suspended = false;
+
+ if (hba->clk_scaling.is_allowed)
+ ufshcd_resume_clkscaling(hba);
+
+ /* Schedule clock gating in case of no access to UFS device yet */
+ ufshcd_release(hba);
+
+ /* Enable Auto-Hibernate if configured */
+ ufshcd_auto_hibern8_enable(hba);
+
+ goto out;
+
+set_old_link_state:
+ ufshcd_link_state_transition(hba, old_link_state, 0);
+vendor_suspend:
+ ufshcd_vops_suspend(hba, pm_op);
+disable_vreg:
+ ufshcd_vreg_set_lpm(hba);
+disable_irq_and_vops_clks:
+ ufshcd_disable_irq(hba);
+ if (hba->clk_scaling.is_allowed)
+ ufshcd_suspend_clkscaling(hba);
+ ufshcd_setup_clocks(hba, false);
+out:
+ hba->pm_op_in_progress = 0;
+ return ret;
+}
+
+/**
+ * ufshcd_system_suspend - system suspend routine
+ * @hba: per adapter instance
+ *
+ * Check the description of ufshcd_suspend() function for more details.
+ *
+ * Returns 0 for success and non-zero for failure
+ */
+int ufshcd_system_suspend(struct ufs_hba *hba)
+{
+ int ret = 0;
+ ktime_t start = ktime_get();
+
+ if (!hba || !hba->is_powered)
+ return 0;
+
+ if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
+ hba->curr_dev_pwr_mode) &&
+ (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
+ hba->uic_link_state))
+ goto out;
+
+ if (pm_runtime_suspended(hba->dev)) {
+ /*
+ * UFS device and/or UFS link low power states during runtime
+ * suspend seems to be different than what is expected during
+ * system suspend. Hence runtime resume the devic & link and
+ * let the system suspend low power states to take effect.
+ * TODO: If resume takes longer time, we might have optimize
+ * it in future by not resuming everything if possible.
+ */
+ ret = ufshcd_runtime_resume(hba);
+ if (ret)
+ goto out;
+ }
+
+ ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
+out:
+ trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)),
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
+ if (!ret)
+ hba->is_sys_suspended = true;
+ return ret;
+}
+EXPORT_SYMBOL(ufshcd_system_suspend);
+
+/**
+ * ufshcd_system_resume - system resume routine
+ * @hba: per adapter instance
+ *
+ * Returns 0 for success and non-zero for failure
+ */
+
+int ufshcd_system_resume(struct ufs_hba *hba)
+{
+ int ret = 0;
+ ktime_t start = ktime_get();
+
+ if (!hba)
+ return -EINVAL;
+
+ if (!hba->is_powered || pm_runtime_suspended(hba->dev))
+ /*
+ * Let the runtime resume take care of resuming
+ * if runtime suspended.
+ */
+ goto out;
+ else
+ ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
+out:
+ trace_ufshcd_system_resume(dev_name(hba->dev), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)),
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
+ return ret;
+}
+EXPORT_SYMBOL(ufshcd_system_resume);
+
+/**
+ * ufshcd_runtime_suspend - runtime suspend routine
+ * @hba: per adapter instance
+ *
+ * Check the description of ufshcd_suspend() function for more details.
+ *
+ * Returns 0 for success and non-zero for failure
+ */
+int ufshcd_runtime_suspend(struct ufs_hba *hba)
+{
+ int ret = 0;
+ ktime_t start = ktime_get();
+
+ if (!hba)
+ return -EINVAL;
+
+ if (!hba->is_powered)
+ goto out;
+ else
+ ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
+out:
+ trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)),
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
+ return ret;
+}
+EXPORT_SYMBOL(ufshcd_runtime_suspend);
+
+/**
+ * ufshcd_runtime_resume - runtime resume routine
+ * @hba: per adapter instance
+ *
+ * This function basically brings the UFS device, UniPro link and controller
+ * to active state. Following operations are done in this function:
+ *
+ * 1. Turn on all the controller related clocks
+ * 2. Bring the UniPro link out of Hibernate state
+ * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
+ * to active state.
+ * 4. If auto-bkops is enabled on the device, disable it.
+ *
+ * So following would be the possible power state after this function return
+ * successfully:
+ * S1: UFS device in Active state with VCC rail ON
+ * UniPro link in Active state
+ * All the UFS/UniPro controller clocks are ON
+ *
+ * Returns 0 for success and non-zero for failure
+ */
+int ufshcd_runtime_resume(struct ufs_hba *hba)
+{
+ int ret = 0;
+ ktime_t start = ktime_get();
+
+ if (!hba)
+ return -EINVAL;
+
+ if (!hba->is_powered)
+ goto out;
+ else
+ ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
+out:
+ trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)),
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
+ return ret;
+}
+EXPORT_SYMBOL(ufshcd_runtime_resume);
+
+int ufshcd_runtime_idle(struct ufs_hba *hba)
+{
+ return 0;
+}
+EXPORT_SYMBOL(ufshcd_runtime_idle);
+
+/**
+ * ufshcd_shutdown - shutdown routine
+ * @hba: per adapter instance
+ *
+ * This function would power off both UFS device and UFS link.
+ *
+ * Returns 0 always to allow force shutdown even in case of errors.
+ */
+int ufshcd_shutdown(struct ufs_hba *hba)
+{
+ int ret = 0;
+
+ if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
+ goto out;
+
+ if (pm_runtime_suspended(hba->dev)) {
+ ret = ufshcd_runtime_resume(hba);
+ if (ret)
+ goto out;
+ }
+
+ ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
+out:
+ if (ret)
+ dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
+ /* allow force shutdown even in case of errors */
+ return 0;
+}
+EXPORT_SYMBOL(ufshcd_shutdown);
+
+/**
+ * ufshcd_remove - de-allocate SCSI host and host memory space
+ * data structure memory
+ * @hba: per adapter instance
+ */
+void ufshcd_remove(struct ufs_hba *hba)
+{
+ ufs_sysfs_remove_nodes(hba->dev);
+ scsi_remove_host(hba->host);
+ /* disable interrupts */
+ ufshcd_disable_intr(hba, hba->intr_mask);
+ ufshcd_hba_stop(hba, true);
+
+ ufshcd_exit_clk_gating(hba);
+ if (ufshcd_is_clkscaling_supported(hba))
+ device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
+ ufshcd_hba_exit(hba);
+}
+EXPORT_SYMBOL_GPL(ufshcd_remove);
+
+/**
+ * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
+ * @hba: pointer to Host Bus Adapter (HBA)
+ */
+void ufshcd_dealloc_host(struct ufs_hba *hba)
+{
+ scsi_host_put(hba->host);
+}
+EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
+
+/**
+ * ufshcd_set_dma_mask - Set dma mask based on the controller
+ * addressing capability
+ * @hba: per adapter instance
+ *
+ * Returns 0 for success, non-zero for failure
+ */
+static int ufshcd_set_dma_mask(struct ufs_hba *hba)
+{
+ if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
+ if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
+ return 0;
+ }
+ return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
+}
+
+/**
+ * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
+ * @dev: pointer to device handle
+ * @hba_handle: driver private handle
+ * Returns 0 on success, non-zero value on failure
+ */
+int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
+{
+ struct Scsi_Host *host;
+ struct ufs_hba *hba;
+ int err = 0;
+
+ if (!dev) {
+ dev_err(dev,
+ "Invalid memory reference for dev is NULL\n");
+ err = -ENODEV;
+ goto out_error;
+ }
+
+ host = scsi_host_alloc(&ufshcd_driver_template,
+ sizeof(struct ufs_hba));
+ if (!host) {
+ dev_err(dev, "scsi_host_alloc failed\n");
+ err = -ENOMEM;
+ goto out_error;
+ }
+ hba = shost_priv(host);
+ hba->host = host;
+ hba->dev = dev;
+ *hba_handle = hba;
+
+ INIT_LIST_HEAD(&hba->clk_list_head);
+
+out_error:
+ return err;
+}
+EXPORT_SYMBOL(ufshcd_alloc_host);
+
+/**
+ * ufshcd_init - Driver initialization routine
+ * @hba: per-adapter instance
+ * @mmio_base: base register address
+ * @irq: Interrupt line of device
+ * Returns 0 on success, non-zero value on failure
+ */
+int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
+{
+ int err;
+ struct Scsi_Host *host = hba->host;
+ struct device *dev = hba->dev;
+
+ if (!mmio_base) {
+ dev_err(hba->dev,
+ "Invalid memory reference for mmio_base is NULL\n");
+ err = -ENODEV;
+ goto out_error;
+ }
+
+ hba->mmio_base = mmio_base;
+ hba->irq = irq;
+
+ /* Set descriptor lengths to specification defaults */
+ ufshcd_def_desc_sizes(hba);
+
+ err = ufshcd_hba_init(hba);
+ if (err)
+ goto out_error;
+
+ /* Read capabilities registers */
+ ufshcd_hba_capabilities(hba);
+
+ /* Get UFS version supported by the controller */
+ hba->ufs_version = ufshcd_get_ufs_version(hba);
+
+ if ((hba->ufs_version != UFSHCI_VERSION_10) &&
+ (hba->ufs_version != UFSHCI_VERSION_11) &&
+ (hba->ufs_version != UFSHCI_VERSION_20) &&
+ (hba->ufs_version != UFSHCI_VERSION_21))
+ dev_err(hba->dev, "invalid UFS version 0x%x\n",
+ hba->ufs_version);
+
+ /* Get Interrupt bit mask per version */
+ hba->intr_mask = ufshcd_get_intr_mask(hba);
+
+ err = ufshcd_set_dma_mask(hba);
+ if (err) {
+ dev_err(hba->dev, "set dma mask failed\n");
+ goto out_disable;
+ }
+
+ /* Allocate memory for host memory space */
+ err = ufshcd_memory_alloc(hba);
+ if (err) {
+ dev_err(hba->dev, "Memory allocation failed\n");
+ goto out_disable;
+ }
+
+ /* Configure LRB */
+ ufshcd_host_memory_configure(hba);
+
+ host->can_queue = hba->nutrs;
+ host->cmd_per_lun = hba->nutrs;
+ host->max_id = UFSHCD_MAX_ID;
+ host->max_lun = UFS_MAX_LUNS;
+ host->max_channel = UFSHCD_MAX_CHANNEL;
+ host->unique_id = host->host_no;
+ host->max_cmd_len = MAX_CDB_SIZE;
+
+ hba->max_pwr_info.is_valid = false;
+
+ /* Initailize wait queue for task management */
+ init_waitqueue_head(&hba->tm_wq);
+ init_waitqueue_head(&hba->tm_tag_wq);
+
+ /* Initialize work queues */
+ INIT_WORK(&hba->eh_work, ufshcd_err_handler);
+ INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
+
+ /* Initialize UIC command mutex */
+ mutex_init(&hba->uic_cmd_mutex);
+
+ /* Initialize mutex for device management commands */
+ mutex_init(&hba->dev_cmd.lock);
+
+ init_rwsem(&hba->clk_scaling_lock);
+
+ /* Initialize device management tag acquire wait queue */
+ init_waitqueue_head(&hba->dev_cmd.tag_wq);
+
+ ufshcd_init_clk_gating(hba);
+
+ /*
+ * In order to avoid any spurious interrupt immediately after
+ * registering UFS controller interrupt handler, clear any pending UFS
+ * interrupt status and disable all the UFS interrupts.
+ */
+ ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
+ REG_INTERRUPT_STATUS);
+ ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
+ /*
+ * Make sure that UFS interrupts are disabled and any pending interrupt
+ * status is cleared before registering UFS interrupt handler.
+ */
+ mb();
+
+ /* IRQ registration */
+ err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
+ if (err) {
+ dev_err(hba->dev, "request irq failed\n");
+ goto exit_gating;
+ } else {
+ hba->is_irq_enabled = true;
+ }
+
+ err = scsi_add_host(host, hba->dev);
+ if (err) {
+ dev_err(hba->dev, "scsi_add_host failed\n");
+ goto exit_gating;
+ }
+
+ /* Host controller enable */
+ err = ufshcd_hba_enable(hba);
+ if (err) {
+ dev_err(hba->dev, "Host controller enable failed\n");
+ ufshcd_print_host_regs(hba);
+ ufshcd_print_host_state(hba);
+ goto out_remove_scsi_host;
+ }
+
+ if (ufshcd_is_clkscaling_supported(hba)) {
+ char wq_name[sizeof("ufs_clkscaling_00")];
+
+ INIT_WORK(&hba->clk_scaling.suspend_work,
+ ufshcd_clk_scaling_suspend_work);
+ INIT_WORK(&hba->clk_scaling.resume_work,
+ ufshcd_clk_scaling_resume_work);
+
+ snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
+ host->host_no);
+ hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
+
+ ufshcd_clkscaling_init_sysfs(hba);
+ }
+
+ /*
+ * Set the default power management level for runtime and system PM.
+ * Default power saving mode is to keep UFS link in Hibern8 state
+ * and UFS device in sleep state.
+ */
+ hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+ UFS_SLEEP_PWR_MODE,
+ UIC_LINK_HIBERN8_STATE);
+ hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+ UFS_SLEEP_PWR_MODE,
+ UIC_LINK_HIBERN8_STATE);
+
+ /* Set the default auto-hiberate idle timer value to 150 ms */
+ if (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) {
+ hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
+ FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
+ }
+
+ /* Hold auto suspend until async scan completes */
+ pm_runtime_get_sync(dev);
+ atomic_set(&hba->scsi_block_reqs_cnt, 0);
+ /*
+ * We are assuming that device wasn't put in sleep/power-down
+ * state exclusively during the boot stage before kernel.
+ * This assumption helps avoid doing link startup twice during
+ * ufshcd_probe_hba().
+ */
+ ufshcd_set_ufs_dev_active(hba);
+
+ async_schedule(ufshcd_async_scan, hba);
+ ufs_sysfs_add_nodes(hba->dev);
+
+ return 0;
+
+out_remove_scsi_host:
+ scsi_remove_host(hba->host);
+exit_gating:
+ ufshcd_exit_clk_gating(hba);
+out_disable:
+ hba->is_irq_enabled = false;
+ ufshcd_hba_exit(hba);
+out_error:
+ return err;
+}
+EXPORT_SYMBOL_GPL(ufshcd_init);
+
+MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
+MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
+MODULE_DESCRIPTION("Generic UFS host controller driver Core");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/rr-cache/03b6e25b31cdeaefcf332323934d7e98f96d97d2/preimage.1 b/rr-cache/03b6e25b31cdeaefcf332323934d7e98f96d97d2/preimage.1
new file mode 100644
index 0000000..2d89e6b
--- /dev/null
+++ b/rr-cache/03b6e25b31cdeaefcf332323934d7e98f96d97d2/preimage.1
@@ -0,0 +1,8142 @@
+/*
+ * Universal Flash Storage Host controller driver Core
+ *
+ * This code is based on drivers/scsi/ufs/ufshcd.c
+ * Copyright (C) 2011-2013 Samsung India Software Operations
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * Authors:
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
+ *
+ * The Linux Foundation chooses to take subject only to the GPLv2
+ * license terms, and distributes only under these terms.
+ */
+
+#include <linux/async.h>
+#include <linux/devfreq.h>
+#include <linux/nls.h>
+#include <linux/of.h>
+#include <linux/bitfield.h>
+#include "ufshcd.h"
+#include "ufs_quirks.h"
+#include "unipro.h"
+#include "ufs-sysfs.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/ufs.h>
+
+#define UFSHCD_REQ_SENSE_SIZE 18
+
+#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
+ UTP_TASK_REQ_COMPL |\
+ UFSHCD_ERROR_MASK)
+/* UIC command timeout, unit: ms */
+#define UIC_CMD_TIMEOUT 500
+
+/* NOP OUT retries waiting for NOP IN response */
+#define NOP_OUT_RETRIES 10
+/* Timeout after 30 msecs if NOP OUT hangs without response */
+#define NOP_OUT_TIMEOUT 30 /* msecs */
+
+/* Query request retries */
+#define QUERY_REQ_RETRIES 3
+/* Query request timeout */
+#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
+
+/* Task management command timeout */
+#define TM_CMD_TIMEOUT 100 /* msecs */
+
+/* maximum number of retries for a general UIC command */
+#define UFS_UIC_COMMAND_RETRIES 3
+
+/* maximum number of link-startup retries */
+#define DME_LINKSTARTUP_RETRIES 3
+
+/* Maximum retries for Hibern8 enter */
+#define UIC_HIBERN8_ENTER_RETRIES 3
+
+/* maximum number of reset retries before giving up */
+#define MAX_HOST_RESET_RETRIES 5
+
+/* Expose the flag value from utp_upiu_query.value */
+#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
+
+/* Interrupt aggregation default timeout, unit: 40us */
+#define INT_AGGR_DEF_TO 0x02
+
+#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
+ ({ \
+ int _ret; \
+ if (_on) \
+ _ret = ufshcd_enable_vreg(_dev, _vreg); \
+ else \
+ _ret = ufshcd_disable_vreg(_dev, _vreg); \
+ _ret; \
+ })
+
+#define ufshcd_hex_dump(prefix_str, buf, len) \
+print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false)
+
+enum {
+ UFSHCD_MAX_CHANNEL = 0,
+ UFSHCD_MAX_ID = 1,
+ UFSHCD_CMD_PER_LUN = 32,
+ UFSHCD_CAN_QUEUE = 32,
+};
+
+/* UFSHCD states */
+enum {
+ UFSHCD_STATE_RESET,
+ UFSHCD_STATE_ERROR,
+ UFSHCD_STATE_OPERATIONAL,
+ UFSHCD_STATE_EH_SCHEDULED,
+};
+
+/* UFSHCD error handling flags */
+enum {
+ UFSHCD_EH_IN_PROGRESS = (1 << 0),
+};
+
+/* UFSHCD UIC layer error flags */
+enum {
+ UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
+ UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
+ UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
+ UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
+ UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
+ UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
+};
+
+#define ufshcd_set_eh_in_progress(h) \
+ ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
+#define ufshcd_eh_in_progress(h) \
+ ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
+#define ufshcd_clear_eh_in_progress(h) \
+ ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
+
+#define ufshcd_set_ufs_dev_active(h) \
+ ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
+#define ufshcd_set_ufs_dev_sleep(h) \
+ ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
+#define ufshcd_set_ufs_dev_poweroff(h) \
+ ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
+#define ufshcd_is_ufs_dev_active(h) \
+ ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
+#define ufshcd_is_ufs_dev_sleep(h) \
+ ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
+#define ufshcd_is_ufs_dev_poweroff(h) \
+ ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
+
+struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
+ {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
+ {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
+ {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
+ {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
+ {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
+ {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
+};
+
+static inline enum ufs_dev_pwr_mode
+ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
+{
+ return ufs_pm_lvl_states[lvl].dev_state;
+}
+
+static inline enum uic_link_state
+ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
+{
+ return ufs_pm_lvl_states[lvl].link_state;
+}
+
+static inline enum ufs_pm_level
+ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
+ enum uic_link_state link_state)
+{
+ enum ufs_pm_level lvl;
+
+ for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
+ if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
+ (ufs_pm_lvl_states[lvl].link_state == link_state))
+ return lvl;
+ }
+
+ /* if no match found, return the level 0 */
+ return UFS_PM_LVL_0;
+}
+
+static struct ufs_dev_fix ufs_fixups[] = {
+ /* UFS cards deviations table */
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+ UFS_DEVICE_NO_FASTAUTO),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
+ UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
+ UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
+ UFS_DEVICE_QUIRK_PA_TACTIVATE),
+ UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
+ UFS_DEVICE_QUIRK_PA_TACTIVATE),
+ UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
+ UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
+
+ END_FIX
+};
+
+static void ufshcd_tmc_handler(struct ufs_hba *hba);
+static void ufshcd_async_scan(void *data, async_cookie_t cookie);
+static int ufshcd_reset_and_restore(struct ufs_hba *hba);
+static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
+static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
+static void ufshcd_hba_exit(struct ufs_hba *hba);
+static int ufshcd_probe_hba(struct ufs_hba *hba);
+static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
+ bool skip_ref_clk);
+static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
+static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
+static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
+static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
+static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
+static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
+static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
+static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
+static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
+static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
+static irqreturn_t ufshcd_intr(int irq, void *__hba);
+static int ufshcd_change_power_mode(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *pwr_mode);
+static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
+{
+ return tag >= 0 && tag < hba->nutrs;
+}
+
+static inline int ufshcd_enable_irq(struct ufs_hba *hba)
+{
+ int ret = 0;
+
+ if (!hba->is_irq_enabled) {
+ ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
+ hba);
+ if (ret)
+ dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
+ __func__, ret);
+ hba->is_irq_enabled = true;
+ }
+
+ return ret;
+}
+
+static inline void ufshcd_disable_irq(struct ufs_hba *hba)
+{
+ if (hba->is_irq_enabled) {
+ free_irq(hba->irq, hba);
+ hba->is_irq_enabled = false;
+ }
+}
+
+static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
+{
+ if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
+ scsi_unblock_requests(hba->host);
+}
+
+static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
+{
+ if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
+ scsi_block_requests(hba->host);
+}
+
+/* replace non-printable or non-ASCII characters with spaces */
+static inline void ufshcd_remove_non_printable(char *val)
+{
+ if (!val)
+ return;
+
+ if (*val < 0x20 || *val > 0x7e)
+ *val = ' ';
+}
+
+static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
+ const char *str)
+{
+ struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
+
+ trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->sc.cdb);
+}
+
+static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag,
+ const char *str)
+{
+ struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
+
+ trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->qr);
+}
+
+static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
+ const char *str)
+{
+ struct utp_task_req_desc *descp;
+ struct utp_upiu_task_req *task_req;
+ int off = (int)tag - hba->nutrs;
+
+ descp = &hba->utmrdl_base_addr[off];
+ task_req = (struct utp_upiu_task_req *)descp->task_req_upiu;
+ trace_ufshcd_upiu(dev_name(hba->dev), str, &task_req->header,
+ &task_req->input_param1);
+}
+
+static void ufshcd_add_command_trace(struct ufs_hba *hba,
+ unsigned int tag, const char *str)
+{
+ sector_t lba = -1;
+ u8 opcode = 0;
+ u32 intr, doorbell;
+ struct ufshcd_lrb *lrbp;
+ int transfer_len = -1;
+
+ /* trace UPIU also */
+ ufshcd_add_cmd_upiu_trace(hba, tag, str);
+
+ if (!trace_ufshcd_command_enabled())
+ return;
+
+ lrbp = &hba->lrb[tag];
+
+ if (lrbp->cmd) { /* data phase exists */
+ opcode = (u8)(*lrbp->cmd->cmnd);
+ if ((opcode == READ_10) || (opcode == WRITE_10)) {
+ /*
+ * Currently we only fully trace read(10) and write(10)
+ * commands
+ */
+ if (lrbp->cmd->request && lrbp->cmd->request->bio)
+ lba =
+ lrbp->cmd->request->bio->bi_iter.bi_sector;
+ transfer_len = be32_to_cpu(
+ lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
+ }
+ }
+
+ intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+ doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ trace_ufshcd_command(dev_name(hba->dev), str, tag,
+ doorbell, transfer_len, intr, lba, opcode);
+}
+
+static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
+{
+ struct ufs_clk_info *clki;
+ struct list_head *head = &hba->clk_list_head;
+
+ if (list_empty(head))
+ return;
+
+ list_for_each_entry(clki, head, list) {
+ if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
+ clki->max_freq)
+ dev_err(hba->dev, "clk: %s, rate: %u\n",
+ clki->name, clki->curr_freq);
+ }
+}
+
+static void ufshcd_print_uic_err_hist(struct ufs_hba *hba,
+ struct ufs_uic_err_reg_hist *err_hist, char *err_name)
+{
+ int i;
+
+ for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) {
+ int p = (i + err_hist->pos - 1) % UIC_ERR_REG_HIST_LENGTH;
+
+ if (err_hist->reg[p] == 0)
+ continue;
+ dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, i,
+ err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
+ }
+}
+
+static void ufshcd_print_host_regs(struct ufs_hba *hba)
+{
+ /*
+ * hex_dump reads its data without the readl macro. This might
+ * cause inconsistency issues on some platform, as the printed
+ * values may be from cache and not the most recent value.
+ * To know whether you are looking at an un-cached version verify
+ * that IORESOURCE_MEM flag is on when xxx_get_resource() is invoked
+ * during platform/pci probe function.
+ */
+ ufshcd_hex_dump("host regs: ", hba->mmio_base, UFSHCI_REG_SPACE_SIZE);
+ dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n",
+ hba->ufs_version, hba->capabilities);
+ dev_err(hba->dev,
+ "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x\n",
+ (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
+ dev_err(hba->dev,
+ "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d\n",
+ ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
+ hba->ufs_stats.hibern8_exit_cnt);
+
+ ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
+ ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
+ ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
+ ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
+ ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
+
+ ufshcd_print_clk_freqs(hba);
+
+ if (hba->vops && hba->vops->dbg_register_dump)
+ hba->vops->dbg_register_dump(hba);
+}
+
+static
+void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
+{
+ struct ufshcd_lrb *lrbp;
+ int prdt_length;
+ int tag;
+
+ for_each_set_bit(tag, &bitmap, hba->nutrs) {
+ lrbp = &hba->lrb[tag];
+
+ dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
+ tag, ktime_to_us(lrbp->issue_time_stamp));
+ dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
+ tag, ktime_to_us(lrbp->compl_time_stamp));
+ dev_err(hba->dev,
+ "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
+ tag, (u64)lrbp->utrd_dma_addr);
+
+ ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
+ sizeof(struct utp_transfer_req_desc));
+ dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
+ (u64)lrbp->ucd_req_dma_addr);
+ ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
+ sizeof(struct utp_upiu_req));
+ dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
+ (u64)lrbp->ucd_rsp_dma_addr);
+ ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
+ sizeof(struct utp_upiu_rsp));
+
+ prdt_length = le16_to_cpu(
+ lrbp->utr_descriptor_ptr->prd_table_length);
+ dev_err(hba->dev,
+ "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
+ tag, prdt_length,
+ (u64)lrbp->ucd_prdt_dma_addr);
+
+ if (pr_prdt)
+ ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
+ sizeof(struct ufshcd_sg_entry) * prdt_length);
+ }
+}
+
+static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
+{
+ struct utp_task_req_desc *tmrdp;
+ int tag;
+
+ for_each_set_bit(tag, &bitmap, hba->nutmrs) {
+ tmrdp = &hba->utmrdl_base_addr[tag];
+ dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
+ ufshcd_hex_dump("TM TRD: ", &tmrdp->header,
+ sizeof(struct request_desc_header));
+ dev_err(hba->dev, "TM[%d] - Task Management Request UPIU\n",
+ tag);
+ ufshcd_hex_dump("TM REQ: ", tmrdp->task_req_upiu,
+ sizeof(struct utp_upiu_req));
+ dev_err(hba->dev, "TM[%d] - Task Management Response UPIU\n",
+ tag);
+ ufshcd_hex_dump("TM RSP: ", tmrdp->task_rsp_upiu,
+ sizeof(struct utp_task_req_desc));
+ }
+}
+
+static void ufshcd_print_host_state(struct ufs_hba *hba)
+{
+ dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
+ dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
+ hba->lrb_in_use, hba->outstanding_reqs, hba->outstanding_tasks);
+ dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
+ hba->saved_err, hba->saved_uic_err);
+ dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
+ dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
+ hba->pm_op_in_progress, hba->is_sys_suspended);
+ dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
+ hba->auto_bkops_enabled, hba->host->host_self_blocked);
+ dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
+ dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
+ hba->eh_flags, hba->req_abort_count);
+ dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
+ hba->capabilities, hba->caps);
+ dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
+ hba->dev_quirks);
+}
+
+/**
+ * ufshcd_print_pwr_info - print power params as saved in hba
+ * power info
+ * @hba: per-adapter instance
+ */
+static void ufshcd_print_pwr_info(struct ufs_hba *hba)
+{
+ static const char * const names[] = {
+ "INVALID MODE",
+ "FAST MODE",
+ "SLOW_MODE",
+ "INVALID MODE",
+ "FASTAUTO_MODE",
+ "SLOWAUTO_MODE",
+ "INVALID MODE",
+ };
+
+ dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
+ __func__,
+ hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
+ hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
+ names[hba->pwr_info.pwr_rx],
+ names[hba->pwr_info.pwr_tx],
+ hba->pwr_info.hs_rate);
+}
+
+/*
+ * ufshcd_wait_for_register - wait for register value to change
+ * @hba - per-adapter interface
+ * @reg - mmio register offset
+ * @mask - mask to apply to read register value
+ * @val - wait condition
+ * @interval_us - polling interval in microsecs
+ * @timeout_ms - timeout in millisecs
+ * @can_sleep - perform sleep or just spin
+ *
+ * Returns -ETIMEDOUT on error, zero on success
+ */
+int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
+ u32 val, unsigned long interval_us,
+ unsigned long timeout_ms, bool can_sleep)
+{
+ int err = 0;
+ unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
+
+ /* ignore bits that we don't intend to wait on */
+ val = val & mask;
+
+ while ((ufshcd_readl(hba, reg) & mask) != val) {
+ if (can_sleep)
+ usleep_range(interval_us, interval_us + 50);
+ else
+ udelay(interval_us);
+ if (time_after(jiffies, timeout)) {
+ if ((ufshcd_readl(hba, reg) & mask) != val)
+ err = -ETIMEDOUT;
+ break;
+ }
+ }
+
+ return err;
+}
+
+/**
+ * ufshcd_get_intr_mask - Get the interrupt bit mask
+ * @hba: Pointer to adapter instance
+ *
+ * Returns interrupt bit mask per version
+ */
+static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
+{
+ u32 intr_mask = 0;
+
+ switch (hba->ufs_version) {
+ case UFSHCI_VERSION_10:
+ intr_mask = INTERRUPT_MASK_ALL_VER_10;
+ break;
+ case UFSHCI_VERSION_11:
+ case UFSHCI_VERSION_20:
+ intr_mask = INTERRUPT_MASK_ALL_VER_11;
+ break;
+ case UFSHCI_VERSION_21:
+ default:
+ intr_mask = INTERRUPT_MASK_ALL_VER_21;
+ break;
+ }
+
+ return intr_mask;
+}
+
+/**
+ * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
+ * @hba: Pointer to adapter instance
+ *
+ * Returns UFSHCI version supported by the controller
+ */
+static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
+{
+ if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
+ return ufshcd_vops_get_ufs_hci_version(hba);
+
+ return ufshcd_readl(hba, REG_UFS_VERSION);
+}
+
+/**
+ * ufshcd_is_device_present - Check if any device connected to
+ * the host controller
+ * @hba: pointer to adapter instance
+ *
+ * Returns true if device present, false if no device detected
+ */
+static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
+{
+ return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
+ DEVICE_PRESENT) ? true : false;
+}
+
+/**
+ * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
+ * @lrbp: pointer to local command reference block
+ *
+ * This function is used to get the OCS field from UTRD
+ * Returns the OCS field in the UTRD
+ */
+static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
+{
+ return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
+}
+
+/**
+ * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
+ * @task_req_descp: pointer to utp_task_req_desc structure
+ *
+ * This function is used to get the OCS field from UTMRD
+ * Returns the OCS field in the UTMRD
+ */
+static inline int
+ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
+{
+ return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
+}
+
+/**
+ * ufshcd_get_tm_free_slot - get a free slot for task management request
+ * @hba: per adapter instance
+ * @free_slot: pointer to variable with available slot value
+ *
+ * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
+ * Returns 0 if free slot is not available, else return 1 with tag value
+ * in @free_slot.
+ */
+static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
+{
+ int tag;
+ bool ret = false;
+
+ if (!free_slot)
+ goto out;
+
+ do {
+ tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
+ if (tag >= hba->nutmrs)
+ goto out;
+ } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
+
+ *free_slot = tag;
+ ret = true;
+out:
+ return ret;
+}
+
+static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
+{
+ clear_bit_unlock(slot, &hba->tm_slots_in_use);
+}
+
+/**
+ * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
+ * @hba: per adapter instance
+ * @pos: position of the bit to be cleared
+ */
+static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
+{
+ if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
+ ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
+ else
+ ufshcd_writel(hba, ~(1 << pos),
+ REG_UTP_TRANSFER_REQ_LIST_CLEAR);
+}
+
+/**
+ * ufshcd_utmrl_clear - Clear a bit in UTRMLCLR register
+ * @hba: per adapter instance
+ * @pos: position of the bit to be cleared
+ */
+static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
+{
+ if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
+ ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
+ else
+ ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
+}
+
+/**
+ * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
+ * @hba: per adapter instance
+ * @tag: position of the bit to be cleared
+ */
+static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
+{
+ __clear_bit(tag, &hba->outstanding_reqs);
+}
+
+/**
+ * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
+ * @reg: Register value of host controller status
+ *
+ * Returns integer, 0 on Success and positive value if failed
+ */
+static inline int ufshcd_get_lists_status(u32 reg)
+{
+ return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
+}
+
+/**
+ * ufshcd_get_uic_cmd_result - Get the UIC command result
+ * @hba: Pointer to adapter instance
+ *
+ * This function gets the result of UIC command completion
+ * Returns 0 on success, non zero value on error
+ */
+static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
+{
+ return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
+ MASK_UIC_COMMAND_RESULT;
+}
+
+/**
+ * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
+ * @hba: Pointer to adapter instance
+ *
+ * This function gets UIC command argument3
+ * Returns 0 on success, non zero value on error
+ */
+static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
+{
+ return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
+}
+
+/**
+ * ufshcd_get_req_rsp - returns the TR response transaction type
+ * @ucd_rsp_ptr: pointer to response UPIU
+ */
+static inline int
+ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
+{
+ return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
+}
+
+/**
+ * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
+ * @ucd_rsp_ptr: pointer to response UPIU
+ *
+ * This function gets the response status and scsi_status from response UPIU
+ * Returns the response result code.
+ */
+static inline int
+ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
+{
+ return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
+}
+
+/*
+ * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
+ * from response UPIU
+ * @ucd_rsp_ptr: pointer to response UPIU
+ *
+ * Return the data segment length.
+ */
+static inline unsigned int
+ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
+{
+ return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
+ MASK_RSP_UPIU_DATA_SEG_LEN;
+}
+
+/**
+ * ufshcd_is_exception_event - Check if the device raised an exception event
+ * @ucd_rsp_ptr: pointer to response UPIU
+ *
+ * The function checks if the device raised an exception event indicated in
+ * the Device Information field of response UPIU.
+ *
+ * Returns true if exception is raised, false otherwise.
+ */
+static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
+{
+ return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
+ MASK_RSP_EXCEPTION_EVENT ? true : false;
+}
+
+/**
+ * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
+ * @hba: per adapter instance
+ */
+static inline void
+ufshcd_reset_intr_aggr(struct ufs_hba *hba)
+{
+ ufshcd_writel(hba, INT_AGGR_ENABLE |
+ INT_AGGR_COUNTER_AND_TIMER_RESET,
+ REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
+}
+
+/**
+ * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
+ * @hba: per adapter instance
+ * @cnt: Interrupt aggregation counter threshold
+ * @tmout: Interrupt aggregation timeout value
+ */
+static inline void
+ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
+{
+ ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
+ INT_AGGR_COUNTER_THLD_VAL(cnt) |
+ INT_AGGR_TIMEOUT_VAL(tmout),
+ REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
+}
+
+/**
+ * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
+ * @hba: per adapter instance
+ */
+static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
+{
+ ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
+}
+
+/**
+ * ufshcd_enable_run_stop_reg - Enable run-stop registers,
+ * When run-stop registers are set to 1, it indicates the
+ * host controller that it can process the requests
+ * @hba: per adapter instance
+ */
+static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
+{
+ ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
+ REG_UTP_TASK_REQ_LIST_RUN_STOP);
+ ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
+ REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
+}
+
+/**
+ * ufshcd_hba_start - Start controller initialization sequence
+ * @hba: per adapter instance
+ */
+static inline void ufshcd_hba_start(struct ufs_hba *hba)
+{
+ ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
+}
+
+/**
+ * ufshcd_is_hba_active - Get controller state
+ * @hba: per adapter instance
+ *
+ * Returns false if controller is active, true otherwise
+ */
+static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
+{
+ return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
+ ? false : true;
+}
+
+u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
+{
+ /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
+ if ((hba->ufs_version == UFSHCI_VERSION_10) ||
+ (hba->ufs_version == UFSHCI_VERSION_11))
+ return UFS_UNIPRO_VER_1_41;
+ else
+ return UFS_UNIPRO_VER_1_6;
+}
+EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
+
+static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
+{
+ /*
+ * If both host and device support UniPro ver1.6 or later, PA layer
+ * parameters tuning happens during link startup itself.
+ *
+ * We can manually tune PA layer parameters if either host or device
+ * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
+ * logic simple, we will only do manual tuning if local unipro version
+ * doesn't support ver1.6 or later.
+ */
+ if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
+ return true;
+ else
+ return false;
+}
+
+static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
+{
+ int ret = 0;
+ struct ufs_clk_info *clki;
+ struct list_head *head = &hba->clk_list_head;
+ ktime_t start = ktime_get();
+ bool clk_state_changed = false;
+
+ if (list_empty(head))
+ goto out;
+
+ ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(clki, head, list) {
+ if (!IS_ERR_OR_NULL(clki->clk)) {
+ if (scale_up && clki->max_freq) {
+ if (clki->curr_freq == clki->max_freq)
+ continue;
+
+ clk_state_changed = true;
+ ret = clk_set_rate(clki->clk, clki->max_freq);
+ if (ret) {
+ dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
+ __func__, clki->name,
+ clki->max_freq, ret);
+ break;
+ }
+ trace_ufshcd_clk_scaling(dev_name(hba->dev),
+ "scaled up", clki->name,
+ clki->curr_freq,
+ clki->max_freq);
+
+ clki->curr_freq = clki->max_freq;
+
+ } else if (!scale_up && clki->min_freq) {
+ if (clki->curr_freq == clki->min_freq)
+ continue;
+
+ clk_state_changed = true;
+ ret = clk_set_rate(clki->clk, clki->min_freq);
+ if (ret) {
+ dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
+ __func__, clki->name,
+ clki->min_freq, ret);
+ break;
+ }
+ trace_ufshcd_clk_scaling(dev_name(hba->dev),
+ "scaled down", clki->name,
+ clki->curr_freq,
+ clki->min_freq);
+ clki->curr_freq = clki->min_freq;
+ }
+ }
+ dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
+ clki->name, clk_get_rate(clki->clk));
+ }
+
+ ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
+
+out:
+ if (clk_state_changed)
+ trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
+ (scale_up ? "up" : "down"),
+ ktime_to_us(ktime_sub(ktime_get(), start)), ret);
+ return ret;
+}
+
+/**
+ * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
+ * @hba: per adapter instance
+ * @scale_up: True if scaling up and false if scaling down
+ *
+ * Returns true if scaling is required, false otherwise.
+ */
+static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
+ bool scale_up)
+{
+ struct ufs_clk_info *clki;
+ struct list_head *head = &hba->clk_list_head;
+
+ if (list_empty(head))
+ return false;
+
+ list_for_each_entry(clki, head, list) {
+ if (!IS_ERR_OR_NULL(clki->clk)) {
+ if (scale_up && clki->max_freq) {
+ if (clki->curr_freq == clki->max_freq)
+ continue;
+ return true;
+ } else if (!scale_up && clki->min_freq) {
+ if (clki->curr_freq == clki->min_freq)
+ continue;
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
+ u64 wait_timeout_us)
+{
+ unsigned long flags;
+ int ret = 0;
+ u32 tm_doorbell;
+ u32 tr_doorbell;
+ bool timeout = false, do_last_check = false;
+ ktime_t start;
+
+ ufshcd_hold(hba, false);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ /*
+ * Wait for all the outstanding tasks/transfer requests.
+ * Verify by checking the doorbell registers are clear.
+ */
+ start = ktime_get();
+ do {
+ if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
+ tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ if (!tm_doorbell && !tr_doorbell) {
+ timeout = false;
+ break;
+ } else if (do_last_check) {
+ break;
+ }
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ schedule();
+ if (ktime_to_us(ktime_sub(ktime_get(), start)) >
+ wait_timeout_us) {
+ timeout = true;
+ /*
+ * We might have scheduled out for long time so make
+ * sure to check if doorbells are cleared by this time
+ * or not.
+ */
+ do_last_check = true;
+ }
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ } while (tm_doorbell || tr_doorbell);
+
+ if (timeout) {
+ dev_err(hba->dev,
+ "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
+ __func__, tm_doorbell, tr_doorbell);
+ ret = -EBUSY;
+ }
+out:
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ufshcd_release(hba);
+ return ret;
+}
+
+/**
+ * ufshcd_scale_gear - scale up/down UFS gear
+ * @hba: per adapter instance
+ * @scale_up: True for scaling up gear and false for scaling down
+ *
+ * Returns 0 for success,
+ * Returns -EBUSY if scaling can't happen at this time
+ * Returns non-zero for any other errors
+ */
+static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
+{
+ #define UFS_MIN_GEAR_TO_SCALE_DOWN UFS_HS_G1
+ int ret = 0;
+ struct ufs_pa_layer_attr new_pwr_info;
+
+ if (scale_up) {
+ memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
+ sizeof(struct ufs_pa_layer_attr));
+ } else {
+ memcpy(&new_pwr_info, &hba->pwr_info,
+ sizeof(struct ufs_pa_layer_attr));
+
+ if (hba->pwr_info.gear_tx > UFS_MIN_GEAR_TO_SCALE_DOWN
+ || hba->pwr_info.gear_rx > UFS_MIN_GEAR_TO_SCALE_DOWN) {
+ /* save the current power mode */
+ memcpy(&hba->clk_scaling.saved_pwr_info.info,
+ &hba->pwr_info,
+ sizeof(struct ufs_pa_layer_attr));
+
+ /* scale down gear */
+ new_pwr_info.gear_tx = UFS_MIN_GEAR_TO_SCALE_DOWN;
+ new_pwr_info.gear_rx = UFS_MIN_GEAR_TO_SCALE_DOWN;
+ }
+ }
+
+ /* check if the power mode needs to be changed or not? */
+ ret = ufshcd_change_power_mode(hba, &new_pwr_info);
+
+ if (ret)
+ dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
+ __func__, ret,
+ hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
+ new_pwr_info.gear_tx, new_pwr_info.gear_rx);
+
+ return ret;
+}
+
+static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
+{
+ #define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */
+ int ret = 0;
+ /*
+ * make sure that there are no outstanding requests when
+ * clock scaling is in progress
+ */
+ ufshcd_scsi_block_requests(hba);
+ down_write(&hba->clk_scaling_lock);
+ if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
+ ret = -EBUSY;
+ up_write(&hba->clk_scaling_lock);
+ ufshcd_scsi_unblock_requests(hba);
+ }
+
+ return ret;
+}
+
+static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
+{
+ up_write(&hba->clk_scaling_lock);
+ ufshcd_scsi_unblock_requests(hba);
+}
+
+/**
+ * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
+ * @hba: per adapter instance
+ * @scale_up: True for scaling up and false for scalin down
+ *
+ * Returns 0 for success,
+ * Returns -EBUSY if scaling can't happen at this time
+ * Returns non-zero for any other errors
+ */
+static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
+{
+ int ret = 0;
+
+ /* let's not get into low power until clock scaling is completed */
+ ufshcd_hold(hba, false);
+
+ ret = ufshcd_clock_scaling_prepare(hba);
+ if (ret)
+ return ret;
+
+ /* scale down the gear before scaling down clocks */
+ if (!scale_up) {
+ ret = ufshcd_scale_gear(hba, false);
+ if (ret)
+ goto out;
+ }
+
+ ret = ufshcd_scale_clks(hba, scale_up);
+ if (ret) {
+ if (!scale_up)
+ ufshcd_scale_gear(hba, true);
+ goto out;
+ }
+
+ /* scale up the gear after scaling up clocks */
+ if (scale_up) {
+ ret = ufshcd_scale_gear(hba, true);
+ if (ret) {
+ ufshcd_scale_clks(hba, false);
+ goto out;
+ }
+ }
+
+ ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
+
+out:
+ ufshcd_clock_scaling_unprepare(hba);
+ ufshcd_release(hba);
+ return ret;
+}
+
+static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
+{
+ struct ufs_hba *hba = container_of(work, struct ufs_hba,
+ clk_scaling.suspend_work);
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(hba->host->host_lock, irq_flags);
+ if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ return;
+ }
+ hba->clk_scaling.is_suspended = true;
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+
+ __ufshcd_suspend_clkscaling(hba);
+}
+
+static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
+{
+ struct ufs_hba *hba = container_of(work, struct ufs_hba,
+ clk_scaling.resume_work);
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(hba->host->host_lock, irq_flags);
+ if (!hba->clk_scaling.is_suspended) {
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ return;
+ }
+ hba->clk_scaling.is_suspended = false;
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+
+ devfreq_resume_device(hba->devfreq);
+}
+
+static int ufshcd_devfreq_target(struct device *dev,
+ unsigned long *freq, u32 flags)
+{
+ int ret = 0;
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ ktime_t start;
+ bool scale_up, sched_clk_scaling_suspend_work = false;
+ struct list_head *clk_list = &hba->clk_list_head;
+ struct ufs_clk_info *clki;
+ unsigned long irq_flags;
+
+ if (!ufshcd_is_clkscaling_supported(hba))
+ return -EINVAL;
+
+ spin_lock_irqsave(hba->host->host_lock, irq_flags);
+ if (ufshcd_eh_in_progress(hba)) {
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ return 0;
+ }
+
+ if (!hba->clk_scaling.active_reqs)
+ sched_clk_scaling_suspend_work = true;
+
+ if (list_empty(clk_list)) {
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ goto out;
+ }
+
+ clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
+ scale_up = (*freq == clki->max_freq) ? true : false;
+ if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ ret = 0;
+ goto out; /* no state change required */
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+
+ start = ktime_get();
+ ret = ufshcd_devfreq_scale(hba, scale_up);
+
+ trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
+ (scale_up ? "up" : "down"),
+ ktime_to_us(ktime_sub(ktime_get(), start)), ret);
+
+out:
+ if (sched_clk_scaling_suspend_work)
+ queue_work(hba->clk_scaling.workq,
+ &hba->clk_scaling.suspend_work);
+
+ return ret;
+}
+
+
+static int ufshcd_devfreq_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *stat)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_clk_scaling *scaling = &hba->clk_scaling;
+ unsigned long flags;
+
+ if (!ufshcd_is_clkscaling_supported(hba))
+ return -EINVAL;
+
+ memset(stat, 0, sizeof(*stat));
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (!scaling->window_start_t)
+ goto start_window;
+
+ if (scaling->is_busy_started)
+ scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
+ scaling->busy_start_t));
+
+ stat->total_time = jiffies_to_usecs((long)jiffies -
+ (long)scaling->window_start_t);
+ stat->busy_time = scaling->tot_busy_t;
+start_window:
+ scaling->window_start_t = jiffies;
+ scaling->tot_busy_t = 0;
+
+ if (hba->outstanding_reqs) {
+ scaling->busy_start_t = ktime_get();
+ scaling->is_busy_started = true;
+ } else {
+ scaling->busy_start_t = 0;
+ scaling->is_busy_started = false;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return 0;
+}
+
+static struct devfreq_dev_profile ufs_devfreq_profile = {
+ .polling_ms = 100,
+ .target = ufshcd_devfreq_target,
+ .get_dev_status = ufshcd_devfreq_get_dev_status,
+};
+
+static int ufshcd_devfreq_init(struct ufs_hba *hba)
+{
+ struct list_head *clk_list = &hba->clk_list_head;
+ struct ufs_clk_info *clki;
+ struct devfreq *devfreq;
+ int ret;
+
+ /* Skip devfreq if we don't have any clocks in the list */
+ if (list_empty(clk_list))
+ return 0;
+
+ clki = list_first_entry(clk_list, struct ufs_clk_info, list);
+ dev_pm_opp_add(hba->dev, clki->min_freq, 0);
+ dev_pm_opp_add(hba->dev, clki->max_freq, 0);
+
+ devfreq = devfreq_add_device(hba->dev,
+ &ufs_devfreq_profile,
+<<<<<<<
+ "simple_ondemand",
+=======
+ DEVFREQ_GOV_SIMPLE_ONDEMAND,
+>>>>>>>
+ NULL);
+ if (IS_ERR(devfreq)) {
+ ret = PTR_ERR(devfreq);
+ dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
+
+ dev_pm_opp_remove(hba->dev, clki->min_freq);
+ dev_pm_opp_remove(hba->dev, clki->max_freq);
+ return ret;
+ }
+
+ hba->devfreq = devfreq;
+
+ return 0;
+}
+
+static void ufshcd_devfreq_remove(struct ufs_hba *hba)
+{
+ struct list_head *clk_list = &hba->clk_list_head;
+ struct ufs_clk_info *clki;
+
+ if (!hba->devfreq)
+ return;
+
+ devfreq_remove_device(hba->devfreq);
+ hba->devfreq = NULL;
+
+ clki = list_first_entry(clk_list, struct ufs_clk_info, list);
+ dev_pm_opp_remove(hba->dev, clki->min_freq);
+ dev_pm_opp_remove(hba->dev, clki->max_freq);
+}
+
+static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
+{
+ unsigned long flags;
+
+ devfreq_suspend_device(hba->devfreq);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->clk_scaling.window_start_t = 0;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+
+static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
+{
+ unsigned long flags;
+ bool suspend = false;
+
+ if (!ufshcd_is_clkscaling_supported(hba))
+ return;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (!hba->clk_scaling.is_suspended) {
+ suspend = true;
+ hba->clk_scaling.is_suspended = true;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ if (suspend)
+ __ufshcd_suspend_clkscaling(hba);
+}
+
+static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
+{
+ unsigned long flags;
+ bool resume = false;
+
+ if (!ufshcd_is_clkscaling_supported(hba))
+ return;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (hba->clk_scaling.is_suspended) {
+ resume = true;
+ hba->clk_scaling.is_suspended = false;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ if (resume)
+ devfreq_resume_device(hba->devfreq);
+}
+
+static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
+}
+
+static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ u32 value;
+ int err;
+
+ if (kstrtou32(buf, 0, &value))
+ return -EINVAL;
+
+ value = !!value;
+ if (value == hba->clk_scaling.is_allowed)
+ goto out;
+
+ pm_runtime_get_sync(hba->dev);
+ ufshcd_hold(hba, false);
+
+ cancel_work_sync(&hba->clk_scaling.suspend_work);
+ cancel_work_sync(&hba->clk_scaling.resume_work);
+
+ hba->clk_scaling.is_allowed = value;
+
+ if (value) {
+ ufshcd_resume_clkscaling(hba);
+ } else {
+ ufshcd_suspend_clkscaling(hba);
+ err = ufshcd_devfreq_scale(hba, true);
+ if (err)
+ dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
+ __func__, err);
+ }
+
+ ufshcd_release(hba);
+ pm_runtime_put_sync(hba->dev);
+out:
+ return count;
+}
+
+static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
+{
+ hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
+ hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
+ sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
+ hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
+ hba->clk_scaling.enable_attr.attr.mode = 0644;
+ if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
+ dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
+}
+
+static void ufshcd_ungate_work(struct work_struct *work)
+{
+ int ret;
+ unsigned long flags;
+ struct ufs_hba *hba = container_of(work, struct ufs_hba,
+ clk_gating.ungate_work);
+
+ cancel_delayed_work_sync(&hba->clk_gating.gate_work);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (hba->clk_gating.state == CLKS_ON) {
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ goto unblock_reqs;
+ }
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ufshcd_setup_clocks(hba, true);
+
+ /* Exit from hibern8 */
+ if (ufshcd_can_hibern8_during_gating(hba)) {
+ /* Prevent gating in this path */
+ hba->clk_gating.is_suspended = true;
+ if (ufshcd_is_link_hibern8(hba)) {
+ ret = ufshcd_uic_hibern8_exit(hba);
+ if (ret)
+ dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
+ __func__, ret);
+ else
+ ufshcd_set_link_active(hba);
+ }
+ hba->clk_gating.is_suspended = false;
+ }
+unblock_reqs:
+ ufshcd_scsi_unblock_requests(hba);
+}
+
+/**
+ * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
+ * Also, exit from hibern8 mode and set the link as active.
+ * @hba: per adapter instance
+ * @async: This indicates whether caller should ungate clocks asynchronously.
+ */
+int ufshcd_hold(struct ufs_hba *hba, bool async)
+{
+ int rc = 0;
+ unsigned long flags;
+
+ if (!ufshcd_is_clkgating_allowed(hba))
+ goto out;
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->clk_gating.active_reqs++;
+
+ if (ufshcd_eh_in_progress(hba)) {
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return 0;
+ }
+
+start:
+ switch (hba->clk_gating.state) {
+ case CLKS_ON:
+ /*
+ * Wait for the ungate work to complete if in progress.
+ * Though the clocks may be in ON state, the link could
+ * still be in hibner8 state if hibern8 is allowed
+ * during clock gating.
+ * Make sure we exit hibern8 state also in addition to
+ * clocks being ON.
+ */
+ if (ufshcd_can_hibern8_during_gating(hba) &&
+ ufshcd_is_link_hibern8(hba)) {
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ flush_work(&hba->clk_gating.ungate_work);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ goto start;
+ }
+ break;
+ case REQ_CLKS_OFF:
+ if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
+ hba->clk_gating.state = CLKS_ON;
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
+ hba->clk_gating.state);
+ break;
+ }
+ /*
+ * If we are here, it means gating work is either done or
+ * currently running. Hence, fall through to cancel gating
+ * work and to enable clocks.
+ */
+ case CLKS_OFF:
+ ufshcd_scsi_block_requests(hba);
+ hba->clk_gating.state = REQ_CLKS_ON;
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
+ hba->clk_gating.state);
+ queue_work(hba->clk_gating.clk_gating_workq,
+ &hba->clk_gating.ungate_work);
+ /*
+ * fall through to check if we should wait for this
+ * work to be done or not.
+ */
+ case REQ_CLKS_ON:
+ if (async) {
+ rc = -EAGAIN;
+ hba->clk_gating.active_reqs--;
+ break;
+ }
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ flush_work(&hba->clk_gating.ungate_work);
+ /* Make sure state is CLKS_ON before returning */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ goto start;
+ default:
+ dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
+ __func__, hba->clk_gating.state);
+ break;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+out:
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ufshcd_hold);
+
+static void ufshcd_gate_work(struct work_struct *work)
+{
+ struct ufs_hba *hba = container_of(work, struct ufs_hba,
+ clk_gating.gate_work.work);
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ /*
+ * In case you are here to cancel this work the gating state
+ * would be marked as REQ_CLKS_ON. In this case save time by
+ * skipping the gating work and exit after changing the clock
+ * state to CLKS_ON.
+ */
+ if (hba->clk_gating.is_suspended ||
+ (hba->clk_gating.state == REQ_CLKS_ON)) {
+ hba->clk_gating.state = CLKS_ON;
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
+ hba->clk_gating.state);
+ goto rel_lock;
+ }
+
+ if (hba->clk_gating.active_reqs
+ || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
+ || hba->lrb_in_use || hba->outstanding_tasks
+ || hba->active_uic_cmd || hba->uic_async_done)
+ goto rel_lock;
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ /* put the link into hibern8 mode before turning off clocks */
+ if (ufshcd_can_hibern8_during_gating(hba)) {
+ if (ufshcd_uic_hibern8_enter(hba)) {
+ hba->clk_gating.state = CLKS_ON;
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
+ hba->clk_gating.state);
+ goto out;
+ }
+ ufshcd_set_link_hibern8(hba);
+ }
+
+ if (!ufshcd_is_link_active(hba))
+ ufshcd_setup_clocks(hba, false);
+ else
+ /* If link is active, device ref_clk can't be switched off */
+ __ufshcd_setup_clocks(hba, false, true);
+
+ /*
+ * In case you are here to cancel this work the gating state
+ * would be marked as REQ_CLKS_ON. In this case keep the state
+ * as REQ_CLKS_ON which would anyway imply that clocks are off
+ * and a request to turn them on is pending. By doing this way,
+ * we keep the state machine in tact and this would ultimately
+ * prevent from doing cancel work multiple times when there are
+ * new requests arriving before the current cancel work is done.
+ */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (hba->clk_gating.state == REQ_CLKS_OFF) {
+ hba->clk_gating.state = CLKS_OFF;
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
+ hba->clk_gating.state);
+ }
+rel_lock:
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+out:
+ return;
+}
+
+/* host lock must be held before calling this variant */
+static void __ufshcd_release(struct ufs_hba *hba)
+{
+ if (!ufshcd_is_clkgating_allowed(hba))
+ return;
+
+ hba->clk_gating.active_reqs--;
+
+ if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
+ || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
+ || hba->lrb_in_use || hba->outstanding_tasks
+ || hba->active_uic_cmd || hba->uic_async_done
+ || ufshcd_eh_in_progress(hba))
+ return;
+
+ hba->clk_gating.state = REQ_CLKS_OFF;
+ trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
+ schedule_delayed_work(&hba->clk_gating.gate_work,
+ msecs_to_jiffies(hba->clk_gating.delay_ms));
+}
+
+void ufshcd_release(struct ufs_hba *hba)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ __ufshcd_release(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+EXPORT_SYMBOL_GPL(ufshcd_release);
+
+static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
+}
+
+static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ unsigned long flags, value;
+
+ if (kstrtoul(buf, 0, &value))
+ return -EINVAL;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->clk_gating.delay_ms = value;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return count;
+}
+
+static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
+}
+
+static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ unsigned long flags;
+ u32 value;
+
+ if (kstrtou32(buf, 0, &value))
+ return -EINVAL;
+
+ value = !!value;
+ if (value == hba->clk_gating.is_enabled)
+ goto out;
+
+ if (value) {
+ ufshcd_release(hba);
+ } else {
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->clk_gating.active_reqs++;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ }
+
+ hba->clk_gating.is_enabled = value;
+out:
+ return count;
+}
+
+static void ufshcd_init_clk_gating(struct ufs_hba *hba)
+{
+ char wq_name[sizeof("ufs_clk_gating_00")];
+
+ if (!ufshcd_is_clkgating_allowed(hba))
+ return;
+
+ hba->clk_gating.delay_ms = 150;
+ INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
+ INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
+
+ snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
+ hba->host->host_no);
+ hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
+ WQ_MEM_RECLAIM);
+
+ hba->clk_gating.is_enabled = true;
+
+ hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
+ hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
+ sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
+ hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
+ hba->clk_gating.delay_attr.attr.mode = 0644;
+ if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
+ dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
+
+ hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
+ hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
+ sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
+ hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
+ hba->clk_gating.enable_attr.attr.mode = 0644;
+ if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
+ dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
+}
+
+static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
+{
+ if (!ufshcd_is_clkgating_allowed(hba))
+ return;
+ device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
+ device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
+ cancel_work_sync(&hba->clk_gating.ungate_work);
+ cancel_delayed_work_sync(&hba->clk_gating.gate_work);
+ destroy_workqueue(hba->clk_gating.clk_gating_workq);
+}
+
+/* Must be called with host lock acquired */
+static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
+{
+ bool queue_resume_work = false;
+
+ if (!ufshcd_is_clkscaling_supported(hba))
+ return;
+
+ if (!hba->clk_scaling.active_reqs++)
+ queue_resume_work = true;
+
+ if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
+ return;
+
+ if (queue_resume_work)
+ queue_work(hba->clk_scaling.workq,
+ &hba->clk_scaling.resume_work);
+
+ if (!hba->clk_scaling.window_start_t) {
+ hba->clk_scaling.window_start_t = jiffies;
+ hba->clk_scaling.tot_busy_t = 0;
+ hba->clk_scaling.is_busy_started = false;
+ }
+
+ if (!hba->clk_scaling.is_busy_started) {
+ hba->clk_scaling.busy_start_t = ktime_get();
+ hba->clk_scaling.is_busy_started = true;
+ }
+}
+
+static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
+{
+ struct ufs_clk_scaling *scaling = &hba->clk_scaling;
+
+ if (!ufshcd_is_clkscaling_supported(hba))
+ return;
+
+ if (!hba->outstanding_reqs && scaling->is_busy_started) {
+ scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
+ scaling->busy_start_t));
+ scaling->busy_start_t = 0;
+ scaling->is_busy_started = false;
+ }
+}
+/**
+ * ufshcd_send_command - Send SCSI or device management commands
+ * @hba: per adapter instance
+ * @task_tag: Task tag of the command
+ */
+static inline
+void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
+{
+ hba->lrb[task_tag].issue_time_stamp = ktime_get();
+ hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0);
+ ufshcd_clk_scaling_start_busy(hba);
+ __set_bit(task_tag, &hba->outstanding_reqs);
+ ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ /* Make sure that doorbell is committed immediately */
+ wmb();
+ ufshcd_add_command_trace(hba, task_tag, "send");
+}
+
+/**
+ * ufshcd_copy_sense_data - Copy sense data in case of check condition
+ * @lrbp: pointer to local reference block
+ */
+static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
+{
+ int len;
+ if (lrbp->sense_buffer &&
+ ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
+ int len_to_copy;
+
+ len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
+ len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
+
+ memcpy(lrbp->sense_buffer,
+ lrbp->ucd_rsp_ptr->sr.sense_data,
+ min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
+ }
+}
+
+/**
+ * ufshcd_copy_query_response() - Copy the Query Response and the data
+ * descriptor
+ * @hba: per adapter instance
+ * @lrbp: pointer to local reference block
+ */
+static
+int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
+
+ memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
+
+ /* Get the descriptor */
+ if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
+ u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
+ GENERAL_UPIU_REQUEST_SIZE;
+ u16 resp_len;
+ u16 buf_len;
+
+ /* data segment length */
+ resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
+ MASK_QUERY_DATA_SEG_LEN;
+ buf_len = be16_to_cpu(
+ hba->dev_cmd.query.request.upiu_req.length);
+ if (likely(buf_len >= resp_len)) {
+ memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
+ } else {
+ dev_warn(hba->dev,
+ "%s: Response size is bigger than buffer",
+ __func__);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ufshcd_hba_capabilities - Read controller capabilities
+ * @hba: per adapter instance
+ */
+static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
+{
+ hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
+
+ /* nutrs and nutmrs are 0 based values */
+ hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
+ hba->nutmrs =
+ ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
+}
+
+/**
+ * ufshcd_ready_for_uic_cmd - Check if controller is ready
+ * to accept UIC commands
+ * @hba: per adapter instance
+ * Return true on success, else false
+ */
+static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
+{
+ if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * ufshcd_get_upmcrs - Get the power mode change request status
+ * @hba: Pointer to adapter instance
+ *
+ * This function gets the UPMCRS field of HCS register
+ * Returns value of UPMCRS field
+ */
+static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
+{
+ return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
+}
+
+/**
+ * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
+ * @hba: per adapter instance
+ * @uic_cmd: UIC command
+ *
+ * Mutex must be held.
+ */
+static inline void
+ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+{
+ WARN_ON(hba->active_uic_cmd);
+
+ hba->active_uic_cmd = uic_cmd;
+
+ /* Write Args */
+ ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
+ ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
+ ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
+
+ /* Write UIC Cmd */
+ ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
+ REG_UIC_COMMAND);
+}
+
+/**
+ * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
+ * @hba: per adapter instance
+ * @uic_cmd: UIC command
+ *
+ * Must be called with mutex held.
+ * Returns 0 only if success.
+ */
+static int
+ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+{
+ int ret;
+ unsigned long flags;
+
+ if (wait_for_completion_timeout(&uic_cmd->done,
+ msecs_to_jiffies(UIC_CMD_TIMEOUT)))
+ ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
+ else
+ ret = -ETIMEDOUT;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->active_uic_cmd = NULL;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ return ret;
+}
+
+/**
+ * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
+ * @hba: per adapter instance
+ * @uic_cmd: UIC command
+ * @completion: initialize the completion only if this is set to true
+ *
+ * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
+ * with mutex held and host_lock locked.
+ * Returns 0 only if success.
+ */
+static int
+__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
+ bool completion)
+{
+ if (!ufshcd_ready_for_uic_cmd(hba)) {
+ dev_err(hba->dev,
+ "Controller not ready to accept UIC commands\n");
+ return -EIO;
+ }
+
+ if (completion)
+ init_completion(&uic_cmd->done);
+
+ ufshcd_dispatch_uic_cmd(hba, uic_cmd);
+
+ return 0;
+}
+
+/**
+ * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
+ * @hba: per adapter instance
+ * @uic_cmd: UIC command
+ *
+ * Returns 0 only if success.
+ */
+static int
+ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+{
+ int ret;
+ unsigned long flags;
+
+ ufshcd_hold(hba, false);
+ mutex_lock(&hba->uic_cmd_mutex);
+ ufshcd_add_delay_before_dme_cmd(hba);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ if (!ret)
+ ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
+
+ mutex_unlock(&hba->uic_cmd_mutex);
+
+ ufshcd_release(hba);
+ return ret;
+}
+
+/**
+ * ufshcd_map_sg - Map scatter-gather list to prdt
+ * @hba: per adapter instance
+ * @lrbp: pointer to local reference block
+ *
+ * Returns 0 in case of success, non-zero value in case of failure
+ */
+static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ struct ufshcd_sg_entry *prd_table;
+ struct scatterlist *sg;
+ struct scsi_cmnd *cmd;
+ int sg_segments;
+ int i;
+
+ cmd = lrbp->cmd;
+ sg_segments = scsi_dma_map(cmd);
+ if (sg_segments < 0)
+ return sg_segments;
+
+ if (sg_segments) {
+ if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
+ lrbp->utr_descriptor_ptr->prd_table_length =
+ cpu_to_le16((u16)(sg_segments *
+ sizeof(struct ufshcd_sg_entry)));
+ else
+ lrbp->utr_descriptor_ptr->prd_table_length =
+ cpu_to_le16((u16) (sg_segments));
+
+ prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
+
+ scsi_for_each_sg(cmd, sg, sg_segments, i) {
+ prd_table[i].size =
+ cpu_to_le32(((u32) sg_dma_len(sg))-1);
+ prd_table[i].base_addr =
+ cpu_to_le32(lower_32_bits(sg->dma_address));
+ prd_table[i].upper_addr =
+ cpu_to_le32(upper_32_bits(sg->dma_address));
+ prd_table[i].reserved = 0;
+ }
+ } else {
+ lrbp->utr_descriptor_ptr->prd_table_length = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * ufshcd_enable_intr - enable interrupts
+ * @hba: per adapter instance
+ * @intrs: interrupt bits
+ */
+static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
+{
+ u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+
+ if (hba->ufs_version == UFSHCI_VERSION_10) {
+ u32 rw;
+ rw = set & INTERRUPT_MASK_RW_VER_10;
+ set = rw | ((set ^ intrs) & intrs);
+ } else {
+ set |= intrs;
+ }
+
+ ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
+}
+
+/**
+ * ufshcd_disable_intr - disable interrupts
+ * @hba: per adapter instance
+ * @intrs: interrupt bits
+ */
+static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
+{
+ u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+
+ if (hba->ufs_version == UFSHCI_VERSION_10) {
+ u32 rw;
+ rw = (set & INTERRUPT_MASK_RW_VER_10) &
+ ~(intrs & INTERRUPT_MASK_RW_VER_10);
+ set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
+
+ } else {
+ set &= ~intrs;
+ }
+
+ ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
+}
+
+/**
+ * ufshcd_prepare_req_desc_hdr() - Fills the requests header
+ * descriptor according to request
+ * @lrbp: pointer to local reference block
+ * @upiu_flags: flags required in the header
+ * @cmd_dir: requests data direction
+ */
+static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
+ u32 *upiu_flags, enum dma_data_direction cmd_dir)
+{
+ struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
+ u32 data_direction;
+ u32 dword_0;
+
+ if (cmd_dir == DMA_FROM_DEVICE) {
+ data_direction = UTP_DEVICE_TO_HOST;
+ *upiu_flags = UPIU_CMD_FLAGS_READ;
+ } else if (cmd_dir == DMA_TO_DEVICE) {
+ data_direction = UTP_HOST_TO_DEVICE;
+ *upiu_flags = UPIU_CMD_FLAGS_WRITE;
+ } else {
+ data_direction = UTP_NO_DATA_TRANSFER;
+ *upiu_flags = UPIU_CMD_FLAGS_NONE;
+ }
+
+ dword_0 = data_direction | (lrbp->command_type
+ << UPIU_COMMAND_TYPE_OFFSET);
+ if (lrbp->intr_cmd)
+ dword_0 |= UTP_REQ_DESC_INT_CMD;
+
+ /* Transfer request descriptor header fields */
+ req_desc->header.dword_0 = cpu_to_le32(dword_0);
+ /* dword_1 is reserved, hence it is set to 0 */
+ req_desc->header.dword_1 = 0;
+ /*
+ * assigning invalid value for command status. Controller
+ * updates OCS on command completion, with the command
+ * status
+ */
+ req_desc->header.dword_2 =
+ cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
+ /* dword_3 is reserved, hence it is set to 0 */
+ req_desc->header.dword_3 = 0;
+
+ req_desc->prd_table_length = 0;
+}
+
+/**
+ * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
+ * for scsi commands
+ * @lrbp: local reference block pointer
+ * @upiu_flags: flags
+ */
+static
+void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
+{
+ struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
+ unsigned short cdb_len;
+
+ /* command descriptor fields */
+ ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
+ UPIU_TRANSACTION_COMMAND, upiu_flags,
+ lrbp->lun, lrbp->task_tag);
+ ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
+ UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
+
+ /* Total EHS length and Data segment length will be zero */
+ ucd_req_ptr->header.dword_2 = 0;
+
+ ucd_req_ptr->sc.exp_data_transfer_len =
+ cpu_to_be32(lrbp->cmd->sdb.length);
+
+ cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
+ memset(ucd_req_ptr->sc.cdb, 0, MAX_CDB_SIZE);
+ memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
+
+ memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
+}
+
+/**
+ * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
+ * for query requsts
+ * @hba: UFS hba
+ * @lrbp: local reference block pointer
+ * @upiu_flags: flags
+ */
+static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp, u32 upiu_flags)
+{
+ struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
+ struct ufs_query *query = &hba->dev_cmd.query;
+ u16 len = be16_to_cpu(query->request.upiu_req.length);
+ u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
+
+ /* Query request header */
+ ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
+ UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
+ lrbp->lun, lrbp->task_tag);
+ ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
+ 0, query->request.query_func, 0, 0);
+
+ /* Data segment length only need for WRITE_DESC */
+ if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
+ ucd_req_ptr->header.dword_2 =
+ UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
+ else
+ ucd_req_ptr->header.dword_2 = 0;
+
+ /* Copy the Query Request buffer as is */
+ memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
+ QUERY_OSF_SIZE);
+
+ /* Copy the Descriptor */
+ if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
+ memcpy(descp, query->descriptor, len);
+
+ memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
+}
+
+static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
+{
+ struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
+
+ memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
+
+ /* command descriptor fields */
+ ucd_req_ptr->header.dword_0 =
+ UPIU_HEADER_DWORD(
+ UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
+ /* clear rest of the fields of basic header */
+ ucd_req_ptr->header.dword_1 = 0;
+ ucd_req_ptr->header.dword_2 = 0;
+
+ memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
+}
+
+/**
+ * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
+ * for Device Management Purposes
+ * @hba: per adapter instance
+ * @lrbp: pointer to local reference block
+ */
+static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ u32 upiu_flags;
+ int ret = 0;
+
+ if ((hba->ufs_version == UFSHCI_VERSION_10) ||
+ (hba->ufs_version == UFSHCI_VERSION_11))
+ lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
+ else
+ lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
+
+ ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
+ if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
+ ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
+ else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
+ ufshcd_prepare_utp_nop_upiu(lrbp);
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+/**
+ * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
+ * for SCSI Purposes
+ * @hba: per adapter instance
+ * @lrbp: pointer to local reference block
+ */
+static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ u32 upiu_flags;
+ int ret = 0;
+
+ if ((hba->ufs_version == UFSHCI_VERSION_10) ||
+ (hba->ufs_version == UFSHCI_VERSION_11))
+ lrbp->command_type = UTP_CMD_TYPE_SCSI;
+ else
+ lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
+
+ if (likely(lrbp->cmd)) {
+ ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
+ lrbp->cmd->sc_data_direction);
+ ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/**
+ * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
+ * @upiu_wlun_id: UPIU W-LUN id
+ *
+ * Returns SCSI W-LUN id
+ */
+static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
+{
+ return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
+}
+
+/**
+ * ufshcd_queuecommand - main entry point for SCSI requests
+ * @host: SCSI host pointer
+ * @cmd: command from SCSI Midlayer
+ *
+ * Returns 0 for success, non-zero in case of failure
+ */
+static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
+{
+ struct ufshcd_lrb *lrbp;
+ struct ufs_hba *hba;
+ unsigned long flags;
+ int tag;
+ int err = 0;
+
+ hba = shost_priv(host);
+
+ tag = cmd->request->tag;
+ if (!ufshcd_valid_tag(hba, tag)) {
+ dev_err(hba->dev,
+ "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
+ __func__, tag, cmd, cmd->request);
+ BUG();
+ }
+
+ if (!down_read_trylock(&hba->clk_scaling_lock))
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ switch (hba->ufshcd_state) {
+ case UFSHCD_STATE_OPERATIONAL:
+ break;
+ case UFSHCD_STATE_EH_SCHEDULED:
+ case UFSHCD_STATE_RESET:
+ err = SCSI_MLQUEUE_HOST_BUSY;
+ goto out_unlock;
+ case UFSHCD_STATE_ERROR:
+ set_host_byte(cmd, DID_ERROR);
+ cmd->scsi_done(cmd);
+ goto out_unlock;
+ default:
+ dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
+ __func__, hba->ufshcd_state);
+ set_host_byte(cmd, DID_BAD_TARGET);
+ cmd->scsi_done(cmd);
+ goto out_unlock;
+ }
+
+ /* if error handling is in progress, don't issue commands */
+ if (ufshcd_eh_in_progress(hba)) {
+ set_host_byte(cmd, DID_ERROR);
+ cmd->scsi_done(cmd);
+ goto out_unlock;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ hba->req_abort_count = 0;
+
+ /* acquire the tag to make sure device cmds don't use it */
+ if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
+ /*
+ * Dev manage command in progress, requeue the command.
+ * Requeuing the command helps in cases where the request *may*
+ * find different tag instead of waiting for dev manage command
+ * completion.
+ */
+ err = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+
+ err = ufshcd_hold(hba, true);
+ if (err) {
+ err = SCSI_MLQUEUE_HOST_BUSY;
+ clear_bit_unlock(tag, &hba->lrb_in_use);
+ goto out;
+ }
+ WARN_ON(hba->clk_gating.state != CLKS_ON);
+
+ lrbp = &hba->lrb[tag];
+
+ WARN_ON(lrbp->cmd);
+ lrbp->cmd = cmd;
+ lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
+ lrbp->sense_buffer = cmd->sense_buffer;
+ lrbp->task_tag = tag;
+ lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
+ lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
+ lrbp->req_abort_skip = false;
+
+ ufshcd_comp_scsi_upiu(hba, lrbp);
+
+ err = ufshcd_map_sg(hba, lrbp);
+ if (err) {
+ lrbp->cmd = NULL;
+ clear_bit_unlock(tag, &hba->lrb_in_use);
+ goto out;
+ }
+ /* Make sure descriptors are ready before ringing the doorbell */
+ wmb();
+
+ /* issue command to the controller */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
+ ufshcd_send_command(hba, tag);
+out_unlock:
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+out:
+ up_read(&hba->clk_scaling_lock);
+ return err;
+}
+
+static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
+{
+ lrbp->cmd = NULL;
+ lrbp->sense_bufflen = 0;
+ lrbp->sense_buffer = NULL;
+ lrbp->task_tag = tag;
+ lrbp->lun = 0; /* device management cmd is not specific to any LUN */
+ lrbp->intr_cmd = true; /* No interrupt aggregation */
+ hba->dev_cmd.type = cmd_type;
+
+ return ufshcd_comp_devman_upiu(hba, lrbp);
+}
+
+static int
+ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
+{
+ int err = 0;
+ unsigned long flags;
+ u32 mask = 1 << tag;
+
+ /* clear outstanding transaction before retry */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_utrl_clear(hba, tag);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ /*
+ * wait for for h/w to clear corresponding bit in door-bell.
+ * max. wait is 1 sec.
+ */
+ err = ufshcd_wait_for_register(hba,
+ REG_UTP_TRANSFER_REQ_DOOR_BELL,
+ mask, ~mask, 1000, 1000, true);
+
+ return err;
+}
+
+static int
+ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
+
+ /* Get the UPIU response */
+ query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
+ UPIU_RSP_CODE_OFFSET;
+ return query_res->response;
+}
+
+/**
+ * ufshcd_dev_cmd_completion() - handles device management command responses
+ * @hba: per adapter instance
+ * @lrbp: pointer to local reference block
+ */
+static int
+ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ int resp;
+ int err = 0;
+
+ hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
+ resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
+
+ switch (resp) {
+ case UPIU_TRANSACTION_NOP_IN:
+ if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
+ err = -EINVAL;
+ dev_err(hba->dev, "%s: unexpected response %x\n",
+ __func__, resp);
+ }
+ break;
+ case UPIU_TRANSACTION_QUERY_RSP:
+ err = ufshcd_check_query_response(hba, lrbp);
+ if (!err)
+ err = ufshcd_copy_query_response(hba, lrbp);
+ break;
+ case UPIU_TRANSACTION_REJECT_UPIU:
+ /* TODO: handle Reject UPIU Response */
+ err = -EPERM;
+ dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
+ __func__);
+ break;
+ default:
+ err = -EINVAL;
+ dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
+ __func__, resp);
+ break;
+ }
+
+ return err;
+}
+
+static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp, int max_timeout)
+{
+ int err = 0;
+ unsigned long time_left;
+ unsigned long flags;
+
+ time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
+ msecs_to_jiffies(max_timeout));
+
+ /* Make sure descriptors are ready before ringing the doorbell */
+ wmb();
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->dev_cmd.complete = NULL;
+ if (likely(time_left)) {
+ err = ufshcd_get_tr_ocs(lrbp);
+ if (!err)
+ err = ufshcd_dev_cmd_completion(hba, lrbp);
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ if (!time_left) {
+ err = -ETIMEDOUT;
+ dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
+ __func__, lrbp->task_tag);
+ if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
+ /* successfully cleared the command, retry if needed */
+ err = -EAGAIN;
+ /*
+ * in case of an error, after clearing the doorbell,
+ * we also need to clear the outstanding_request
+ * field in hba
+ */
+ ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
+ }
+
+ return err;
+}
+
+/**
+ * ufshcd_get_dev_cmd_tag - Get device management command tag
+ * @hba: per-adapter instance
+ * @tag_out: pointer to variable with available slot value
+ *
+ * Get a free slot and lock it until device management command
+ * completes.
+ *
+ * Returns false if free slot is unavailable for locking, else
+ * return true with tag value in @tag.
+ */
+static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
+{
+ int tag;
+ bool ret = false;
+ unsigned long tmp;
+
+ if (!tag_out)
+ goto out;
+
+ do {
+ tmp = ~hba->lrb_in_use;
+ tag = find_last_bit(&tmp, hba->nutrs);
+ if (tag >= hba->nutrs)
+ goto out;
+ } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
+
+ *tag_out = tag;
+ ret = true;
+out:
+ return ret;
+}
+
+static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
+{
+ clear_bit_unlock(tag, &hba->lrb_in_use);
+}
+
+/**
+ * ufshcd_exec_dev_cmd - API for sending device management requests
+ * @hba: UFS hba
+ * @cmd_type: specifies the type (NOP, Query...)
+ * @timeout: time in seconds
+ *
+ * NOTE: Since there is only one available tag for device management commands,
+ * it is expected you hold the hba->dev_cmd.lock mutex.
+ */
+static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
+ enum dev_cmd_type cmd_type, int timeout)
+{
+ struct ufshcd_lrb *lrbp;
+ int err;
+ int tag;
+ struct completion wait;
+ unsigned long flags;
+
+ down_read(&hba->clk_scaling_lock);
+
+ /*
+ * Get free slot, sleep if slots are unavailable.
+ * Even though we use wait_event() which sleeps indefinitely,
+ * the maximum wait time is bounded by SCSI request timeout.
+ */
+ wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
+
+ init_completion(&wait);
+ lrbp = &hba->lrb[tag];
+ WARN_ON(lrbp->cmd);
+ err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
+ if (unlikely(err))
+ goto out_put_tag;
+
+ hba->dev_cmd.complete = &wait;
+
+ ufshcd_add_query_upiu_trace(hba, tag, "query_send");
+ /* Make sure descriptors are ready before ringing the doorbell */
+ wmb();
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
+ ufshcd_send_command(hba, tag);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
+
+ ufshcd_add_query_upiu_trace(hba, tag,
+ err ? "query_complete_err" : "query_complete");
+
+out_put_tag:
+ ufshcd_put_dev_cmd_tag(hba, tag);
+ wake_up(&hba->dev_cmd.tag_wq);
+ up_read(&hba->clk_scaling_lock);
+ return err;
+}
+
+/**
+ * ufshcd_init_query() - init the query response and request parameters
+ * @hba: per-adapter instance
+ * @request: address of the request pointer to be initialized
+ * @response: address of the response pointer to be initialized
+ * @opcode: operation to perform
+ * @idn: flag idn to access
+ * @index: LU number to access
+ * @selector: query/flag/descriptor further identification
+ */
+static inline void ufshcd_init_query(struct ufs_hba *hba,
+ struct ufs_query_req **request, struct ufs_query_res **response,
+ enum query_opcode opcode, u8 idn, u8 index, u8 selector)
+{
+ *request = &hba->dev_cmd.query.request;
+ *response = &hba->dev_cmd.query.response;
+ memset(*request, 0, sizeof(struct ufs_query_req));
+ memset(*response, 0, sizeof(struct ufs_query_res));
+ (*request)->upiu_req.opcode = opcode;
+ (*request)->upiu_req.idn = idn;
+ (*request)->upiu_req.index = index;
+ (*request)->upiu_req.selector = selector;
+}
+
+static int ufshcd_query_flag_retry(struct ufs_hba *hba,
+ enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
+{
+ int ret;
+ int retries;
+
+ for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
+ ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
+ if (ret)
+ dev_dbg(hba->dev,
+ "%s: failed with error %d, retries %d\n",
+ __func__, ret, retries);
+ else
+ break;
+ }
+
+ if (ret)
+ dev_err(hba->dev,
+ "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
+ __func__, opcode, idn, ret, retries);
+ return ret;
+}
+
+/**
+ * ufshcd_query_flag() - API function for sending flag query requests
+ * @hba: per-adapter instance
+ * @opcode: flag query to perform
+ * @idn: flag idn to access
+ * @flag_res: the flag value after the query request completes
+ *
+ * Returns 0 for success, non-zero in case of failure
+ */
+int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
+ enum flag_idn idn, bool *flag_res)
+{
+ struct ufs_query_req *request = NULL;
+ struct ufs_query_res *response = NULL;
+ int err, index = 0, selector = 0;
+ int timeout = QUERY_REQ_TIMEOUT;
+
+ BUG_ON(!hba);
+
+ ufshcd_hold(hba, false);
+ mutex_lock(&hba->dev_cmd.lock);
+ ufshcd_init_query(hba, &request, &response, opcode, idn, index,
+ selector);
+
+ switch (opcode) {
+ case UPIU_QUERY_OPCODE_SET_FLAG:
+ case UPIU_QUERY_OPCODE_CLEAR_FLAG:
+ case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
+ break;
+ case UPIU_QUERY_OPCODE_READ_FLAG:
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
+ if (!flag_res) {
+ /* No dummy reads */
+ dev_err(hba->dev, "%s: Invalid argument for read request\n",
+ __func__);
+ err = -EINVAL;
+ goto out_unlock;
+ }
+ break;
+ default:
+ dev_err(hba->dev,
+ "%s: Expected query flag opcode but got = %d\n",
+ __func__, opcode);
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
+
+ if (err) {
+ dev_err(hba->dev,
+ "%s: Sending flag query for idn %d failed, err = %d\n",
+ __func__, idn, err);
+ goto out_unlock;
+ }
+
+ if (flag_res)
+ *flag_res = (be32_to_cpu(response->upiu_res.value) &
+ MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
+
+out_unlock:
+ mutex_unlock(&hba->dev_cmd.lock);
+ ufshcd_release(hba);
+ return err;
+}
+
+/**
+ * ufshcd_query_attr - API function for sending attribute requests
+ * @hba: per-adapter instance
+ * @opcode: attribute opcode
+ * @idn: attribute idn to access
+ * @index: index field
+ * @selector: selector field
+ * @attr_val: the attribute value after the query request completes
+ *
+ * Returns 0 for success, non-zero in case of failure
+*/
+int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
+ enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
+{
+ struct ufs_query_req *request = NULL;
+ struct ufs_query_res *response = NULL;
+ int err;
+
+ BUG_ON(!hba);
+
+ ufshcd_hold(hba, false);
+ if (!attr_val) {
+ dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
+ __func__, opcode);
+ err = -EINVAL;
+ goto out;
+ }
+
+ mutex_lock(&hba->dev_cmd.lock);
+ ufshcd_init_query(hba, &request, &response, opcode, idn, index,
+ selector);
+
+ switch (opcode) {
+ case UPIU_QUERY_OPCODE_WRITE_ATTR:
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
+ request->upiu_req.value = cpu_to_be32(*attr_val);
+ break;
+ case UPIU_QUERY_OPCODE_READ_ATTR:
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
+ break;
+ default:
+ dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
+ __func__, opcode);
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+
+ if (err) {
+ dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
+ __func__, opcode, idn, index, err);
+ goto out_unlock;
+ }
+
+ *attr_val = be32_to_cpu(response->upiu_res.value);
+
+out_unlock:
+ mutex_unlock(&hba->dev_cmd.lock);
+out:
+ ufshcd_release(hba);
+ return err;
+}
+
+/**
+ * ufshcd_query_attr_retry() - API function for sending query
+ * attribute with retries
+ * @hba: per-adapter instance
+ * @opcode: attribute opcode
+ * @idn: attribute idn to access
+ * @index: index field
+ * @selector: selector field
+ * @attr_val: the attribute value after the query request
+ * completes
+ *
+ * Returns 0 for success, non-zero in case of failure
+*/
+static int ufshcd_query_attr_retry(struct ufs_hba *hba,
+ enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
+ u32 *attr_val)
+{
+ int ret = 0;
+ u32 retries;
+
+ for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+ ret = ufshcd_query_attr(hba, opcode, idn, index,
+ selector, attr_val);
+ if (ret)
+ dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
+ __func__, ret, retries);
+ else
+ break;
+ }
+
+ if (ret)
+ dev_err(hba->dev,
+ "%s: query attribute, idn %d, failed with error %d after %d retires\n",
+ __func__, idn, ret, QUERY_REQ_RETRIES);
+ return ret;
+}
+
+static int __ufshcd_query_descriptor(struct ufs_hba *hba,
+ enum query_opcode opcode, enum desc_idn idn, u8 index,
+ u8 selector, u8 *desc_buf, int *buf_len)
+{
+ struct ufs_query_req *request = NULL;
+ struct ufs_query_res *response = NULL;
+ int err;
+
+ BUG_ON(!hba);
+
+ ufshcd_hold(hba, false);
+ if (!desc_buf) {
+ dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
+ __func__, opcode);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
+ dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
+ __func__, *buf_len);
+ err = -EINVAL;
+ goto out;
+ }
+
+ mutex_lock(&hba->dev_cmd.lock);
+ ufshcd_init_query(hba, &request, &response, opcode, idn, index,
+ selector);
+ hba->dev_cmd.query.descriptor = desc_buf;
+ request->upiu_req.length = cpu_to_be16(*buf_len);
+
+ switch (opcode) {
+ case UPIU_QUERY_OPCODE_WRITE_DESC:
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
+ break;
+ case UPIU_QUERY_OPCODE_READ_DESC:
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
+ break;
+ default:
+ dev_err(hba->dev,
+ "%s: Expected query descriptor opcode but got = 0x%.2x\n",
+ __func__, opcode);
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+
+ if (err) {
+ dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
+ __func__, opcode, idn, index, err);
+ goto out_unlock;
+ }
+
+ hba->dev_cmd.query.descriptor = NULL;
+ *buf_len = be16_to_cpu(response->upiu_res.length);
+
+out_unlock:
+ mutex_unlock(&hba->dev_cmd.lock);
+out:
+ ufshcd_release(hba);
+ return err;
+}
+
+/**
+ * ufshcd_query_descriptor_retry - API function for sending descriptor requests
+ * @hba: per-adapter instance
+ * @opcode: attribute opcode
+ * @idn: attribute idn to access
+ * @index: index field
+ * @selector: selector field
+ * @desc_buf: the buffer that contains the descriptor
+ * @buf_len: length parameter passed to the device
+ *
+ * Returns 0 for success, non-zero in case of failure.
+ * The buf_len parameter will contain, on return, the length parameter
+ * received on the response.
+ */
+int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
+ enum query_opcode opcode,
+ enum desc_idn idn, u8 index,
+ u8 selector,
+ u8 *desc_buf, int *buf_len)
+{
+ int err;
+ int retries;
+
+ for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+ err = __ufshcd_query_descriptor(hba, opcode, idn, index,
+ selector, desc_buf, buf_len);
+ if (!err || err == -EINVAL)
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * ufshcd_read_desc_length - read the specified descriptor length from header
+ * @hba: Pointer to adapter instance
+ * @desc_id: descriptor idn value
+ * @desc_index: descriptor index
+ * @desc_length: pointer to variable to read the length of descriptor
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+static int ufshcd_read_desc_length(struct ufs_hba *hba,
+ enum desc_idn desc_id,
+ int desc_index,
+ int *desc_length)
+{
+ int ret;
+ u8 header[QUERY_DESC_HDR_SIZE];
+ int header_len = QUERY_DESC_HDR_SIZE;
+
+ if (desc_id >= QUERY_DESC_IDN_MAX)
+ return -EINVAL;
+
+ ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
+ desc_id, desc_index, 0, header,
+ &header_len);
+
+ if (ret) {
+ dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
+ __func__, desc_id);
+ return ret;
+ } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
+ dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
+ __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
+ desc_id);
+ ret = -EINVAL;
+ }
+
+ *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
+ return ret;
+
+}
+
+/**
+ * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
+ * @hba: Pointer to adapter instance
+ * @desc_id: descriptor idn value
+ * @desc_len: mapped desc length (out)
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+int ufshcd_map_desc_id_to_length(struct ufs_hba *hba,
+ enum desc_idn desc_id, int *desc_len)
+{
+ switch (desc_id) {
+ case QUERY_DESC_IDN_DEVICE:
+ *desc_len = hba->desc_size.dev_desc;
+ break;
+ case QUERY_DESC_IDN_POWER:
+ *desc_len = hba->desc_size.pwr_desc;
+ break;
+ case QUERY_DESC_IDN_GEOMETRY:
+ *desc_len = hba->desc_size.geom_desc;
+ break;
+ case QUERY_DESC_IDN_CONFIGURATION:
+ *desc_len = hba->desc_size.conf_desc;
+ break;
+ case QUERY_DESC_IDN_UNIT:
+ *desc_len = hba->desc_size.unit_desc;
+ break;
+ case QUERY_DESC_IDN_INTERCONNECT:
+ *desc_len = hba->desc_size.interc_desc;
+ break;
+ case QUERY_DESC_IDN_STRING:
+ *desc_len = QUERY_DESC_MAX_SIZE;
+ break;
+ case QUERY_DESC_IDN_HEALTH:
+ *desc_len = hba->desc_size.hlth_desc;
+ break;
+ case QUERY_DESC_IDN_RFU_0:
+ case QUERY_DESC_IDN_RFU_1:
+ *desc_len = 0;
+ break;
+ default:
+ *desc_len = 0;
+ return -EINVAL;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
+
+/**
+ * ufshcd_read_desc_param - read the specified descriptor parameter
+ * @hba: Pointer to adapter instance
+ * @desc_id: descriptor idn value
+ * @desc_index: descriptor index
+ * @param_offset: offset of the parameter to read
+ * @param_read_buf: pointer to buffer where parameter would be read
+ * @param_size: sizeof(param_read_buf)
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+int ufshcd_read_desc_param(struct ufs_hba *hba,
+ enum desc_idn desc_id,
+ int desc_index,
+ u8 param_offset,
+ u8 *param_read_buf,
+ u8 param_size)
+{
+ int ret;
+ u8 *desc_buf;
+ int buff_len;
+ bool is_kmalloc = true;
+
+ /* Safety check */
+ if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
+ return -EINVAL;
+
+ /* Get the max length of descriptor from structure filled up at probe
+ * time.
+ */
+ ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
+
+ /* Sanity checks */
+ if (ret || !buff_len) {
+ dev_err(hba->dev, "%s: Failed to get full descriptor length",
+ __func__);
+ return ret;
+ }
+
+ /* Check whether we need temp memory */
+ if (param_offset != 0 || param_size < buff_len) {
+ desc_buf = kmalloc(buff_len, GFP_KERNEL);
+ if (!desc_buf)
+ return -ENOMEM;
+ } else {
+ desc_buf = param_read_buf;
+ is_kmalloc = false;
+ }
+
+ /* Request for full descriptor */
+ ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
+ desc_id, desc_index, 0,
+ desc_buf, &buff_len);
+
+ if (ret) {
+ dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
+ __func__, desc_id, desc_index, param_offset, ret);
+ goto out;
+ }
+
+ /* Sanity check */
+ if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
+ dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
+ __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Check wherher we will not copy more data, than available */
+ if (is_kmalloc && param_size > buff_len)
+ param_size = buff_len;
+
+ if (is_kmalloc)
+ memcpy(param_read_buf, &desc_buf[param_offset], param_size);
+out:
+ if (is_kmalloc)
+ kfree(desc_buf);
+ return ret;
+}
+
+static inline int ufshcd_read_desc(struct ufs_hba *hba,
+ enum desc_idn desc_id,
+ int desc_index,
+ u8 *buf,
+ u32 size)
+{
+ return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
+}
+
+static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
+ u8 *buf,
+ u32 size)
+{
+ return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
+}
+
+static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
+{
+ return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
+}
+
+/**
+ * ufshcd_read_string_desc - read string descriptor
+ * @hba: pointer to adapter instance
+ * @desc_index: descriptor index
+ * @buf: pointer to buffer where descriptor would be read
+ * @size: size of buf
+ * @ascii: if true convert from unicode to ascii characters
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index,
+ u8 *buf, u32 size, bool ascii)
+{
+ int err = 0;
+
+ err = ufshcd_read_desc(hba,
+ QUERY_DESC_IDN_STRING, desc_index, buf, size);
+
+ if (err) {
+ dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
+ __func__, QUERY_REQ_RETRIES, err);
+ goto out;
+ }
+
+ if (ascii) {
+ int desc_len;
+ int ascii_len;
+ int i;
+ char *buff_ascii;
+
+ desc_len = buf[0];
+ /* remove header and divide by 2 to move from UTF16 to UTF8 */
+ ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
+ if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
+ dev_err(hba->dev, "%s: buffer allocated size is too small\n",
+ __func__);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
+ if (!buff_ascii) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * the descriptor contains string in UTF16 format
+ * we need to convert to utf-8 so it can be displayed
+ */
+ utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE],
+ desc_len - QUERY_DESC_HDR_SIZE,
+ UTF16_BIG_ENDIAN, buff_ascii, ascii_len);
+
+ /* replace non-printable or non-ASCII characters with spaces */
+ for (i = 0; i < ascii_len; i++)
+ ufshcd_remove_non_printable(&buff_ascii[i]);
+
+ memset(buf + QUERY_DESC_HDR_SIZE, 0,
+ size - QUERY_DESC_HDR_SIZE);
+ memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
+ buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
+ kfree(buff_ascii);
+ }
+out:
+ return err;
+}
+
+/**
+ * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
+ * @hba: Pointer to adapter instance
+ * @lun: lun id
+ * @param_offset: offset of the parameter to read
+ * @param_read_buf: pointer to buffer where parameter would be read
+ * @param_size: sizeof(param_read_buf)
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
+ int lun,
+ enum unit_desc_param param_offset,
+ u8 *param_read_buf,
+ u32 param_size)
+{
+ /*
+ * Unit descriptors are only available for general purpose LUs (LUN id
+ * from 0 to 7) and RPMB Well known LU.
+ */
+ if (!ufs_is_valid_unit_desc_lun(lun))
+ return -EOPNOTSUPP;
+
+ return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
+ param_offset, param_read_buf, param_size);
+}
+
+/**
+ * ufshcd_memory_alloc - allocate memory for host memory space data structures
+ * @hba: per adapter instance
+ *
+ * 1. Allocate DMA memory for Command Descriptor array
+ * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
+ * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
+ * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
+ * (UTMRDL)
+ * 4. Allocate memory for local reference block(lrb).
+ *
+ * Returns 0 for success, non-zero in case of failure
+ */
+static int ufshcd_memory_alloc(struct ufs_hba *hba)
+{
+ size_t utmrdl_size, utrdl_size, ucdl_size;
+
+ /* Allocate memory for UTP command descriptors */
+ ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
+ hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
+ ucdl_size,
+ &hba->ucdl_dma_addr,
+ GFP_KERNEL);
+
+ /*
+ * UFSHCI requires UTP command descriptor to be 128 byte aligned.
+ * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
+ * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
+ * be aligned to 128 bytes as well
+ */
+ if (!hba->ucdl_base_addr ||
+ WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
+ dev_err(hba->dev,
+ "Command Descriptor Memory allocation failed\n");
+ goto out;
+ }
+
+ /*
+ * Allocate memory for UTP Transfer descriptors
+ * UFSHCI requires 1024 byte alignment of UTRD
+ */
+ utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
+ hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
+ utrdl_size,
+ &hba->utrdl_dma_addr,
+ GFP_KERNEL);
+ if (!hba->utrdl_base_addr ||
+ WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
+ dev_err(hba->dev,
+ "Transfer Descriptor Memory allocation failed\n");
+ goto out;
+ }
+
+ /*
+ * Allocate memory for UTP Task Management descriptors
+ * UFSHCI requires 1024 byte alignment of UTMRD
+ */
+ utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
+ hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
+ utmrdl_size,
+ &hba->utmrdl_dma_addr,
+ GFP_KERNEL);
+ if (!hba->utmrdl_base_addr ||
+ WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
+ dev_err(hba->dev,
+ "Task Management Descriptor Memory allocation failed\n");
+ goto out;
+ }
+
+ /* Allocate memory for local reference block */
+ hba->lrb = devm_kcalloc(hba->dev,
+ hba->nutrs, sizeof(struct ufshcd_lrb),
+ GFP_KERNEL);
+ if (!hba->lrb) {
+ dev_err(hba->dev, "LRB Memory allocation failed\n");
+ goto out;
+ }
+ return 0;
+out:
+ return -ENOMEM;
+}
+
+/**
+ * ufshcd_host_memory_configure - configure local reference block with
+ * memory offsets
+ * @hba: per adapter instance
+ *
+ * Configure Host memory space
+ * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
+ * address.
+ * 2. Update each UTRD with Response UPIU offset, Response UPIU length
+ * and PRDT offset.
+ * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
+ * into local reference block.
+ */
+static void ufshcd_host_memory_configure(struct ufs_hba *hba)
+{
+ struct utp_transfer_cmd_desc *cmd_descp;
+ struct utp_transfer_req_desc *utrdlp;
+ dma_addr_t cmd_desc_dma_addr;
+ dma_addr_t cmd_desc_element_addr;
+ u16 response_offset;
+ u16 prdt_offset;
+ int cmd_desc_size;
+ int i;
+
+ utrdlp = hba->utrdl_base_addr;
+ cmd_descp = hba->ucdl_base_addr;
+
+ response_offset =
+ offsetof(struct utp_transfer_cmd_desc, response_upiu);
+ prdt_offset =
+ offsetof(struct utp_transfer_cmd_desc, prd_table);
+
+ cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
+ cmd_desc_dma_addr = hba->ucdl_dma_addr;
+
+ for (i = 0; i < hba->nutrs; i++) {
+ /* Configure UTRD with command descriptor base address */
+ cmd_desc_element_addr =
+ (cmd_desc_dma_addr + (cmd_desc_size * i));
+ utrdlp[i].command_desc_base_addr_lo =
+ cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
+ utrdlp[i].command_desc_base_addr_hi =
+ cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
+
+ /* Response upiu and prdt offset should be in double words */
+ if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
+ utrdlp[i].response_upiu_offset =
+ cpu_to_le16(response_offset);
+ utrdlp[i].prd_table_offset =
+ cpu_to_le16(prdt_offset);
+ utrdlp[i].response_upiu_length =
+ cpu_to_le16(ALIGNED_UPIU_SIZE);
+ } else {
+ utrdlp[i].response_upiu_offset =
+ cpu_to_le16((response_offset >> 2));
+ utrdlp[i].prd_table_offset =
+ cpu_to_le16((prdt_offset >> 2));
+ utrdlp[i].response_upiu_length =
+ cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
+ }
+
+ hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
+ hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr +
+ (i * sizeof(struct utp_transfer_req_desc));
+ hba->lrb[i].ucd_req_ptr =
+ (struct utp_upiu_req *)(cmd_descp + i);
+ hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr;
+ hba->lrb[i].ucd_rsp_ptr =
+ (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
+ hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr +
+ response_offset;
+ hba->lrb[i].ucd_prdt_ptr =
+ (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
+ hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr +
+ prdt_offset;
+ }
+}
+
+/**
+ * ufshcd_dme_link_startup - Notify Unipro to perform link startup
+ * @hba: per adapter instance
+ *
+ * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
+ * in order to initialize the Unipro link startup procedure.
+ * Once the Unipro links are up, the device connected to the controller
+ * is detected.
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_dme_link_startup(struct ufs_hba *hba)
+{
+ struct uic_command uic_cmd = {0};
+ int ret;
+
+ uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
+
+ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+ if (ret)
+ dev_dbg(hba->dev,
+ "dme-link-startup: error code %d\n", ret);
+ return ret;
+}
+/**
+ * ufshcd_dme_reset - UIC command for DME_RESET
+ * @hba: per adapter instance
+ *
+ * DME_RESET command is issued in order to reset UniPro stack.
+ * This function now deal with cold reset.
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_dme_reset(struct ufs_hba *hba)
+{
+ struct uic_command uic_cmd = {0};
+ int ret;
+
+ uic_cmd.command = UIC_CMD_DME_RESET;
+
+ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+ if (ret)
+ dev_err(hba->dev,
+ "dme-reset: error code %d\n", ret);
+
+ return ret;
+}
+
+/**
+ * ufshcd_dme_enable - UIC command for DME_ENABLE
+ * @hba: per adapter instance
+ *
+ * DME_ENABLE command is issued in order to enable UniPro stack.
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_dme_enable(struct ufs_hba *hba)
+{
+ struct uic_command uic_cmd = {0};
+ int ret;
+
+ uic_cmd.command = UIC_CMD_DME_ENABLE;
+
+ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+ if (ret)
+ dev_err(hba->dev,
+ "dme-reset: error code %d\n", ret);
+
+ return ret;
+}
+
+static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
+{
+ #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
+ unsigned long min_sleep_time_us;
+
+ if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
+ return;
+
+ /*
+ * last_dme_cmd_tstamp will be 0 only for 1st call to
+ * this function
+ */
+ if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
+ min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
+ } else {
+ unsigned long delta =
+ (unsigned long) ktime_to_us(
+ ktime_sub(ktime_get(),
+ hba->last_dme_cmd_tstamp));
+
+ if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
+ min_sleep_time_us =
+ MIN_DELAY_BEFORE_DME_CMDS_US - delta;
+ else
+ return; /* no more delay required */
+ }
+
+ /* allow sleep for extra 50us if needed */
+ usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
+}
+
+/**
+ * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
+ * @hba: per adapter instance
+ * @attr_sel: uic command argument1
+ * @attr_set: attribute set type as uic command argument2
+ * @mib_val: setting value as uic command argument3
+ * @peer: indicate whether peer or local
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
+ u8 attr_set, u32 mib_val, u8 peer)
+{
+ struct uic_command uic_cmd = {0};
+ static const char *const action[] = {
+ "dme-set",
+ "dme-peer-set"
+ };
+ const char *set = action[!!peer];
+ int ret;
+ int retries = UFS_UIC_COMMAND_RETRIES;
+
+ uic_cmd.command = peer ?
+ UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
+ uic_cmd.argument1 = attr_sel;
+ uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
+ uic_cmd.argument3 = mib_val;
+
+ do {
+ /* for peer attributes we retry upon failure */
+ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+ if (ret)
+ dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
+ set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
+ } while (ret && peer && --retries);
+
+ if (ret)
+ dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
+ set, UIC_GET_ATTR_ID(attr_sel), mib_val,
+ UFS_UIC_COMMAND_RETRIES - retries);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
+
+/**
+ * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
+ * @hba: per adapter instance
+ * @attr_sel: uic command argument1
+ * @mib_val: the value of the attribute as returned by the UIC command
+ * @peer: indicate whether peer or local
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
+ u32 *mib_val, u8 peer)
+{
+ struct uic_command uic_cmd = {0};
+ static const char *const action[] = {
+ "dme-get",
+ "dme-peer-get"
+ };
+ const char *get = action[!!peer];
+ int ret;
+ int retries = UFS_UIC_COMMAND_RETRIES;
+ struct ufs_pa_layer_attr orig_pwr_info;
+ struct ufs_pa_layer_attr temp_pwr_info;
+ bool pwr_mode_change = false;
+
+ if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
+ orig_pwr_info = hba->pwr_info;
+ temp_pwr_info = orig_pwr_info;
+
+ if (orig_pwr_info.pwr_tx == FAST_MODE ||
+ orig_pwr_info.pwr_rx == FAST_MODE) {
+ temp_pwr_info.pwr_tx = FASTAUTO_MODE;
+ temp_pwr_info.pwr_rx = FASTAUTO_MODE;
+ pwr_mode_change = true;
+ } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
+ orig_pwr_info.pwr_rx == SLOW_MODE) {
+ temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
+ temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
+ pwr_mode_change = true;
+ }
+ if (pwr_mode_change) {
+ ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
+ if (ret)
+ goto out;
+ }
+ }
+
+ uic_cmd.command = peer ?
+ UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
+ uic_cmd.argument1 = attr_sel;
+
+ do {
+ /* for peer attributes we retry upon failure */
+ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+ if (ret)
+ dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
+ get, UIC_GET_ATTR_ID(attr_sel), ret);
+ } while (ret && peer && --retries);
+
+ if (ret)
+ dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
+ get, UIC_GET_ATTR_ID(attr_sel),
+ UFS_UIC_COMMAND_RETRIES - retries);
+
+ if (mib_val && !ret)
+ *mib_val = uic_cmd.argument3;
+
+ if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
+ && pwr_mode_change)
+ ufshcd_change_power_mode(hba, &orig_pwr_info);
+out:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
+
+/**
+ * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
+ * state) and waits for it to take effect.
+ *
+ * @hba: per adapter instance
+ * @cmd: UIC command to execute
+ *
+ * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
+ * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
+ * and device UniPro link and hence it's final completion would be indicated by
+ * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
+ * addition to normal UIC command completion Status (UCCS). This function only
+ * returns after the relevant status bits indicate the completion.
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
+{
+ struct completion uic_async_done;
+ unsigned long flags;
+ u8 status;
+ int ret;
+ bool reenable_intr = false;
+
+ mutex_lock(&hba->uic_cmd_mutex);
+ init_completion(&uic_async_done);
+ ufshcd_add_delay_before_dme_cmd(hba);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->uic_async_done = &uic_async_done;
+ if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
+ ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
+ /*
+ * Make sure UIC command completion interrupt is disabled before
+ * issuing UIC command.
+ */
+ wmb();
+ reenable_intr = true;
+ }
+ ret = __ufshcd_send_uic_cmd(hba, cmd, false);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ if (ret) {
+ dev_err(hba->dev,
+ "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
+ cmd->command, cmd->argument3, ret);
+ goto out;
+ }
+
+ if (!wait_for_completion_timeout(hba->uic_async_done,
+ msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
+ dev_err(hba->dev,
+ "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
+ cmd->command, cmd->argument3);
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ status = ufshcd_get_upmcrs(hba);
+ if (status != PWR_LOCAL) {
+ dev_err(hba->dev,
+ "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
+ cmd->command, status);
+ ret = (status != PWR_OK) ? status : -1;
+ }
+out:
+ if (ret) {
+ ufshcd_print_host_state(hba);
+ ufshcd_print_pwr_info(hba);
+ ufshcd_print_host_regs(hba);
+ }
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->active_uic_cmd = NULL;
+ hba->uic_async_done = NULL;
+ if (reenable_intr)
+ ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ mutex_unlock(&hba->uic_cmd_mutex);
+
+ return ret;
+}
+
+/**
+ * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
+ * using DME_SET primitives.
+ * @hba: per adapter instance
+ * @mode: powr mode value
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
+{
+ struct uic_command uic_cmd = {0};
+ int ret;
+
+ if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
+ ret = ufshcd_dme_set(hba,
+ UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
+ if (ret) {
+ dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
+ __func__, ret);
+ goto out;
+ }
+ }
+
+ uic_cmd.command = UIC_CMD_DME_SET;
+ uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
+ uic_cmd.argument3 = mode;
+ ufshcd_hold(hba, false);
+ ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+ ufshcd_release(hba);
+
+out:
+ return ret;
+}
+
+static int ufshcd_link_recovery(struct ufs_hba *hba)
+{
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->ufshcd_state = UFSHCD_STATE_RESET;
+ ufshcd_set_eh_in_progress(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ ret = ufshcd_host_reset_and_restore(hba);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (ret)
+ hba->ufshcd_state = UFSHCD_STATE_ERROR;
+ ufshcd_clear_eh_in_progress(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ if (ret)
+ dev_err(hba->dev, "%s: link recovery failed, err %d",
+ __func__, ret);
+
+ return ret;
+}
+
+static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
+{
+ int ret;
+ struct uic_command uic_cmd = {0};
+ ktime_t start = ktime_get();
+
+ ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
+
+ uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
+ ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+ trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
+ ktime_to_us(ktime_sub(ktime_get(), start)), ret);
+
+ if (ret) {
+ dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
+ __func__, ret);
+
+ /*
+ * If link recovery fails then return error so that caller
+ * don't retry the hibern8 enter again.
+ */
+ if (ufshcd_link_recovery(hba))
+ ret = -ENOLINK;
+ } else
+ ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
+ POST_CHANGE);
+
+ return ret;
+}
+
+static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
+{
+ int ret = 0, retries;
+
+ for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
+ ret = __ufshcd_uic_hibern8_enter(hba);
+ if (!ret || ret == -ENOLINK)
+ goto out;
+ }
+out:
+ return ret;
+}
+
+static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
+{
+ struct uic_command uic_cmd = {0};
+ int ret;
+ ktime_t start = ktime_get();
+
+ ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
+
+ uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
+ ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+ trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
+ ktime_to_us(ktime_sub(ktime_get(), start)), ret);
+
+ if (ret) {
+ dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
+ __func__, ret);
+ ret = ufshcd_link_recovery(hba);
+ } else {
+ ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
+ POST_CHANGE);
+ hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
+ hba->ufs_stats.hibern8_exit_cnt++;
+ }
+
+ return ret;
+}
+
+static void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
+{
+ unsigned long flags;
+
+ if (!(hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) || !hba->ahit)
+ return;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+
+ /**
+ * ufshcd_init_pwr_info - setting the POR (power on reset)
+ * values in hba power info
+ * @hba: per-adapter instance
+ */
+static void ufshcd_init_pwr_info(struct ufs_hba *hba)
+{
+ hba->pwr_info.gear_rx = UFS_PWM_G1;
+ hba->pwr_info.gear_tx = UFS_PWM_G1;
+ hba->pwr_info.lane_rx = 1;
+ hba->pwr_info.lane_tx = 1;
+ hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
+ hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
+ hba->pwr_info.hs_rate = 0;
+}
+
+/**
+ * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
+ * @hba: per-adapter instance
+ */
+static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
+{
+ struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
+
+ if (hba->max_pwr_info.is_valid)
+ return 0;
+
+ pwr_info->pwr_tx = FAST_MODE;
+ pwr_info->pwr_rx = FAST_MODE;
+ pwr_info->hs_rate = PA_HS_MODE_B;
+
+ /* Get the connected lane count */
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
+ &pwr_info->lane_rx);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
+ &pwr_info->lane_tx);
+
+ if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
+ dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
+ __func__,
+ pwr_info->lane_rx,
+ pwr_info->lane_tx);
+ return -EINVAL;
+ }
+
+ /*
+ * First, get the maximum gears of HS speed.
+ * If a zero value, it means there is no HSGEAR capability.
+ * Then, get the maximum gears of PWM speed.
+ */
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
+ if (!pwr_info->gear_rx) {
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
+ &pwr_info->gear_rx);
+ if (!pwr_info->gear_rx) {
+ dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
+ __func__, pwr_info->gear_rx);
+ return -EINVAL;
+ }
+ pwr_info->pwr_rx = SLOW_MODE;
+ }
+
+ ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
+ &pwr_info->gear_tx);
+ if (!pwr_info->gear_tx) {
+ ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
+ &pwr_info->gear_tx);
+ if (!pwr_info->gear_tx) {
+ dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
+ __func__, pwr_info->gear_tx);
+ return -EINVAL;
+ }
+ pwr_info->pwr_tx = SLOW_MODE;
+ }
+
+ hba->max_pwr_info.is_valid = true;
+ return 0;
+}
+
+static int ufshcd_change_power_mode(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *pwr_mode)
+{
+ int ret;
+
+ /* if already configured to the requested pwr_mode */
+ if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
+ pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
+ pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
+ pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
+ pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
+ pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
+ pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
+ dev_dbg(hba->dev, "%s: power already configured\n", __func__);
+ return 0;
+ }
+
+ /*
+ * Configure attributes for power mode change with below.
+ * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
+ * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
+ * - PA_HSSERIES
+ */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
+ pwr_mode->lane_rx);
+ if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
+ pwr_mode->pwr_rx == FAST_MODE)
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
+ else
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
+ pwr_mode->lane_tx);
+ if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
+ pwr_mode->pwr_tx == FAST_MODE)
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
+ else
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
+
+ if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
+ pwr_mode->pwr_tx == FASTAUTO_MODE ||
+ pwr_mode->pwr_rx == FAST_MODE ||
+ pwr_mode->pwr_tx == FAST_MODE)
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
+ pwr_mode->hs_rate);
+
+ ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
+ | pwr_mode->pwr_tx);
+
+ if (ret) {
+ dev_err(hba->dev,
+ "%s: power mode change failed %d\n", __func__, ret);
+ } else {
+ ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
+ pwr_mode);
+
+ memcpy(&hba->pwr_info, pwr_mode,
+ sizeof(struct ufs_pa_layer_attr));
+ }
+
+ return ret;
+}
+
+/**
+ * ufshcd_config_pwr_mode - configure a new power mode
+ * @hba: per-adapter instance
+ * @desired_pwr_mode: desired power configuration
+ */
+int ufshcd_config_pwr_mode(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *desired_pwr_mode)
+{
+ struct ufs_pa_layer_attr final_params = { 0 };
+ int ret;
+
+ ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
+ desired_pwr_mode, &final_params);
+
+ if (ret)
+ memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
+
+ ret = ufshcd_change_power_mode(hba, &final_params);
+ if (!ret)
+ ufshcd_print_pwr_info(hba);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
+
+/**
+ * ufshcd_complete_dev_init() - checks device readiness
+ * @hba: per-adapter instance
+ *
+ * Set fDeviceInit flag and poll until device toggles it.
+ */
+static int ufshcd_complete_dev_init(struct ufs_hba *hba)
+{
+ int i;
+ int err;
+ bool flag_res = 1;
+
+ err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
+ QUERY_FLAG_IDN_FDEVICEINIT, NULL);
+ if (err) {
+ dev_err(hba->dev,
+ "%s setting fDeviceInit flag failed with error %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ /* poll for max. 1000 iterations for fDeviceInit flag to clear */
+ for (i = 0; i < 1000 && !err && flag_res; i++)
+ err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
+ QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
+
+ if (err)
+ dev_err(hba->dev,
+ "%s reading fDeviceInit flag failed with error %d\n",
+ __func__, err);
+ else if (flag_res)
+ dev_err(hba->dev,
+ "%s fDeviceInit was not cleared by the device\n",
+ __func__);
+
+out:
+ return err;
+}
+
+/**
+ * ufshcd_make_hba_operational - Make UFS controller operational
+ * @hba: per adapter instance
+ *
+ * To bring UFS host controller to operational state,
+ * 1. Enable required interrupts
+ * 2. Configure interrupt aggregation
+ * 3. Program UTRL and UTMRL base address
+ * 4. Configure run-stop-registers
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_make_hba_operational(struct ufs_hba *hba)
+{
+ int err = 0;
+ u32 reg;
+
+ /* Enable required interrupts */
+ ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
+
+ /* Configure interrupt aggregation */
+ if (ufshcd_is_intr_aggr_allowed(hba))
+ ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
+ else
+ ufshcd_disable_intr_aggr(hba);
+
+ /* Configure UTRL and UTMRL base address registers */
+ ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
+ REG_UTP_TRANSFER_REQ_LIST_BASE_L);
+ ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
+ REG_UTP_TRANSFER_REQ_LIST_BASE_H);
+ ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
+ REG_UTP_TASK_REQ_LIST_BASE_L);
+ ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
+ REG_UTP_TASK_REQ_LIST_BASE_H);
+
+ /*
+ * Make sure base address and interrupt setup are updated before
+ * enabling the run/stop registers below.
+ */
+ wmb();
+
+ /*
+ * UCRDY, UTMRLDY and UTRLRDY bits must be 1
+ */
+ reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
+ if (!(ufshcd_get_lists_status(reg))) {
+ ufshcd_enable_run_stop_reg(hba);
+ } else {
+ dev_err(hba->dev,
+ "Host controller not ready to process requests");
+ err = -EIO;
+ goto out;
+ }
+
+out:
+ return err;
+}
+
+/**
+ * ufshcd_hba_stop - Send controller to reset state
+ * @hba: per adapter instance
+ * @can_sleep: perform sleep or just spin
+ */
+static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
+{
+ int err;
+
+ ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
+ err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
+ CONTROLLER_ENABLE, CONTROLLER_DISABLE,
+ 10, 1, can_sleep);
+ if (err)
+ dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
+}
+
+/**
+ * ufshcd_hba_execute_hce - initialize the controller
+ * @hba: per adapter instance
+ *
+ * The controller resets itself and controller firmware initialization
+ * sequence kicks off. When controller is ready it will set
+ * the Host Controller Enable bit to 1.
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
+{
+ int retry;
+
+ /*
+ * msleep of 1 and 5 used in this function might result in msleep(20),
+ * but it was necessary to send the UFS FPGA to reset mode during
+ * development and testing of this driver. msleep can be changed to
+ * mdelay and retry count can be reduced based on the controller.
+ */
+ if (!ufshcd_is_hba_active(hba))
+ /* change controller state to "reset state" */
+ ufshcd_hba_stop(hba, true);
+
+ /* UniPro link is disabled at this point */
+ ufshcd_set_link_off(hba);
+
+ ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
+
+ /* start controller initialization sequence */
+ ufshcd_hba_start(hba);
+
+ /*
+ * To initialize a UFS host controller HCE bit must be set to 1.
+ * During initialization the HCE bit value changes from 1->0->1.
+ * When the host controller completes initialization sequence
+ * it sets the value of HCE bit to 1. The same HCE bit is read back
+ * to check if the controller has completed initialization sequence.
+ * So without this delay the value HCE = 1, set in the previous
+ * instruction might be read back.
+ * This delay can be changed based on the controller.
+ */
+ msleep(1);
+
+ /* wait for the host controller to complete initialization */
+ retry = 10;
+ while (ufshcd_is_hba_active(hba)) {
+ if (retry) {
+ retry--;
+ } else {
+ dev_err(hba->dev,
+ "Controller enable failed\n");
+ return -EIO;
+ }
+ msleep(5);
+ }
+
+ /* enable UIC related interrupts */
+ ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
+
+ ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
+
+ return 0;
+}
+
+static int ufshcd_hba_enable(struct ufs_hba *hba)
+{
+ int ret;
+
+ if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
+ ufshcd_set_link_off(hba);
+ ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
+
+ /* enable UIC related interrupts */
+ ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
+ ret = ufshcd_dme_reset(hba);
+ if (!ret) {
+ ret = ufshcd_dme_enable(hba);
+ if (!ret)
+ ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
+ if (ret)
+ dev_err(hba->dev,
+ "Host controller enable failed with non-hce\n");
+ }
+ } else {
+ ret = ufshcd_hba_execute_hce(hba);
+ }
+
+ return ret;
+}
+static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
+{
+ int tx_lanes, i, err = 0;
+
+ if (!peer)
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
+ &tx_lanes);
+ else
+ ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
+ &tx_lanes);
+ for (i = 0; i < tx_lanes; i++) {
+ if (!peer)
+ err = ufshcd_dme_set(hba,
+ UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
+ UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
+ 0);
+ else
+ err = ufshcd_dme_peer_set(hba,
+ UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
+ UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
+ 0);
+ if (err) {
+ dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
+ __func__, peer, i, err);
+ break;
+ }
+ }
+
+ return err;
+}
+
+static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
+{
+ return ufshcd_disable_tx_lcc(hba, true);
+}
+
+/**
+ * ufshcd_link_startup - Initialize unipro link startup
+ * @hba: per adapter instance
+ *
+ * Returns 0 for success, non-zero in case of failure
+ */
+static int ufshcd_link_startup(struct ufs_hba *hba)
+{
+ int ret;
+ int retries = DME_LINKSTARTUP_RETRIES;
+ bool link_startup_again = false;
+
+ /*
+ * If UFS device isn't active then we will have to issue link startup
+ * 2 times to make sure the device state move to active.
+ */
+ if (!ufshcd_is_ufs_dev_active(hba))
+ link_startup_again = true;
+
+link_startup:
+ do {
+ ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
+
+ ret = ufshcd_dme_link_startup(hba);
+
+ /* check if device is detected by inter-connect layer */
+ if (!ret && !ufshcd_is_device_present(hba)) {
+ dev_err(hba->dev, "%s: Device not present\n", __func__);
+ ret = -ENXIO;
+ goto out;
+ }
+
+ /*
+ * DME link lost indication is only received when link is up,
+ * but we can't be sure if the link is up until link startup
+ * succeeds. So reset the local Uni-Pro and try again.
+ */
+ if (ret && ufshcd_hba_enable(hba))
+ goto out;
+ } while (ret && retries--);
+
+ if (ret)
+ /* failed to get the link up... retire */
+ goto out;
+
+ if (link_startup_again) {
+ link_startup_again = false;
+ retries = DME_LINKSTARTUP_RETRIES;
+ goto link_startup;
+ }
+
+ /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
+ ufshcd_init_pwr_info(hba);
+ ufshcd_print_pwr_info(hba);
+
+ if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
+ ret = ufshcd_disable_device_tx_lcc(hba);
+ if (ret)
+ goto out;
+ }
+
+ /* Include any host controller configuration via UIC commands */
+ ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_make_hba_operational(hba);
+out:
+ if (ret) {
+ dev_err(hba->dev, "link startup failed %d\n", ret);
+ ufshcd_print_host_state(hba);
+ ufshcd_print_pwr_info(hba);
+ ufshcd_print_host_regs(hba);
+ }
+ return ret;
+}
+
+/**
+ * ufshcd_verify_dev_init() - Verify device initialization
+ * @hba: per-adapter instance
+ *
+ * Send NOP OUT UPIU and wait for NOP IN response to check whether the
+ * device Transport Protocol (UTP) layer is ready after a reset.
+ * If the UTP layer at the device side is not initialized, it may
+ * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
+ * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
+ */
+static int ufshcd_verify_dev_init(struct ufs_hba *hba)
+{
+ int err = 0;
+ int retries;
+
+ ufshcd_hold(hba, false);
+ mutex_lock(&hba->dev_cmd.lock);
+ for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
+ NOP_OUT_TIMEOUT);
+
+ if (!err || err == -ETIMEDOUT)
+ break;
+
+ dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
+ }
+ mutex_unlock(&hba->dev_cmd.lock);
+ ufshcd_release(hba);
+
+ if (err)
+ dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
+ return err;
+}
+
+/**
+ * ufshcd_set_queue_depth - set lun queue depth
+ * @sdev: pointer to SCSI device
+ *
+ * Read bLUQueueDepth value and activate scsi tagged command
+ * queueing. For WLUN, queue depth is set to 1. For best-effort
+ * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
+ * value that host can queue.
+ */
+static void ufshcd_set_queue_depth(struct scsi_device *sdev)
+{
+ int ret = 0;
+ u8 lun_qdepth;
+ struct ufs_hba *hba;
+
+ hba = shost_priv(sdev->host);
+
+ lun_qdepth = hba->nutrs;
+ ret = ufshcd_read_unit_desc_param(hba,
+ ufshcd_scsi_to_upiu_lun(sdev->lun),
+ UNIT_DESC_PARAM_LU_Q_DEPTH,
+ &lun_qdepth,
+ sizeof(lun_qdepth));
+
+ /* Some WLUN doesn't support unit descriptor */
+ if (ret == -EOPNOTSUPP)
+ lun_qdepth = 1;
+ else if (!lun_qdepth)
+ /* eventually, we can figure out the real queue depth */
+ lun_qdepth = hba->nutrs;
+ else
+ lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
+
+ dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
+ __func__, lun_qdepth);
+ scsi_change_queue_depth(sdev, lun_qdepth);
+}
+
+/*
+ * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
+ * @hba: per-adapter instance
+ * @lun: UFS device lun id
+ * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
+ *
+ * Returns 0 in case of success and b_lu_write_protect status would be returned
+ * @b_lu_write_protect parameter.
+ * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
+ * Returns -EINVAL in case of invalid parameters passed to this function.
+ */
+static int ufshcd_get_lu_wp(struct ufs_hba *hba,
+ u8 lun,
+ u8 *b_lu_write_protect)
+{
+ int ret;
+
+ if (!b_lu_write_protect)
+ ret = -EINVAL;
+ /*
+ * According to UFS device spec, RPMB LU can't be write
+ * protected so skip reading bLUWriteProtect parameter for
+ * it. For other W-LUs, UNIT DESCRIPTOR is not available.
+ */
+ else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
+ ret = -ENOTSUPP;
+ else
+ ret = ufshcd_read_unit_desc_param(hba,
+ lun,
+ UNIT_DESC_PARAM_LU_WR_PROTECT,
+ b_lu_write_protect,
+ sizeof(*b_lu_write_protect));
+ return ret;
+}
+
+/**
+ * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
+ * status
+ * @hba: per-adapter instance
+ * @sdev: pointer to SCSI device
+ *
+ */
+static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
+ struct scsi_device *sdev)
+{
+ if (hba->dev_info.f_power_on_wp_en &&
+ !hba->dev_info.is_lu_power_on_wp) {
+ u8 b_lu_write_protect;
+
+ if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
+ &b_lu_write_protect) &&
+ (b_lu_write_protect == UFS_LU_POWER_ON_WP))
+ hba->dev_info.is_lu_power_on_wp = true;
+ }
+}
+
+/**
+ * ufshcd_slave_alloc - handle initial SCSI device configurations
+ * @sdev: pointer to SCSI device
+ *
+ * Returns success
+ */
+static int ufshcd_slave_alloc(struct scsi_device *sdev)
+{
+ struct ufs_hba *hba;
+
+ hba = shost_priv(sdev->host);
+
+ /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
+ sdev->use_10_for_ms = 1;
+
+ /* allow SCSI layer to restart the device in case of errors */
+ sdev->allow_restart = 1;
+
+ /* REPORT SUPPORTED OPERATION CODES is not supported */
+ sdev->no_report_opcodes = 1;
+
+ /* WRITE_SAME command is not supported */
+ sdev->no_write_same = 1;
+
+ ufshcd_set_queue_depth(sdev);
+
+ ufshcd_get_lu_power_on_wp_status(hba, sdev);
+
+ return 0;
+}
+
+/**
+ * ufshcd_change_queue_depth - change queue depth
+ * @sdev: pointer to SCSI device
+ * @depth: required depth to set
+ *
+ * Change queue depth and make sure the max. limits are not crossed.
+ */
+static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
+{
+ struct ufs_hba *hba = shost_priv(sdev->host);
+
+ if (depth > hba->nutrs)
+ depth = hba->nutrs;
+ return scsi_change_queue_depth(sdev, depth);
+}
+
+/**
+ * ufshcd_slave_configure - adjust SCSI device configurations
+ * @sdev: pointer to SCSI device
+ */
+static int ufshcd_slave_configure(struct scsi_device *sdev)
+{
+ struct request_queue *q = sdev->request_queue;
+
+ blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
+ blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
+
+ return 0;
+}
+
+/**
+ * ufshcd_slave_destroy - remove SCSI device configurations
+ * @sdev: pointer to SCSI device
+ */
+static void ufshcd_slave_destroy(struct scsi_device *sdev)
+{
+ struct ufs_hba *hba;
+
+ hba = shost_priv(sdev->host);
+ /* Drop the reference as it won't be needed anymore */
+ if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->sdev_ufs_device = NULL;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ }
+}
+
+/**
+ * ufshcd_task_req_compl - handle task management request completion
+ * @hba: per adapter instance
+ * @index: index of the completed request
+ * @resp: task management service response
+ *
+ * Returns non-zero value on error, zero on success
+ */
+static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
+{
+ struct utp_task_req_desc *task_req_descp;
+ struct utp_upiu_task_rsp *task_rsp_upiup;
+ unsigned long flags;
+ int ocs_value;
+ int task_result;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+
+ /* Clear completed tasks from outstanding_tasks */
+ __clear_bit(index, &hba->outstanding_tasks);
+
+ task_req_descp = hba->utmrdl_base_addr;
+ ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
+
+ if (ocs_value == OCS_SUCCESS) {
+ task_rsp_upiup = (struct utp_upiu_task_rsp *)
+ task_req_descp[index].task_rsp_upiu;
+ task_result = be32_to_cpu(task_rsp_upiup->output_param1);
+ task_result = task_result & MASK_TM_SERVICE_RESP;
+ if (resp)
+ *resp = (u8)task_result;
+ } else {
+ dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
+ __func__, ocs_value);
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ return ocs_value;
+}
+
+/**
+ * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
+ * @lrbp: pointer to local reference block of completed command
+ * @scsi_status: SCSI command status
+ *
+ * Returns value base on SCSI command status
+ */
+static inline int
+ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
+{
+ int result = 0;
+
+ switch (scsi_status) {
+ case SAM_STAT_CHECK_CONDITION:
+ ufshcd_copy_sense_data(lrbp);
+ case SAM_STAT_GOOD:
+ result |= DID_OK << 16 |
+ COMMAND_COMPLETE << 8 |
+ scsi_status;
+ break;
+ case SAM_STAT_TASK_SET_FULL:
+ case SAM_STAT_BUSY:
+ case SAM_STAT_TASK_ABORTED:
+ ufshcd_copy_sense_data(lrbp);
+ result |= scsi_status;
+ break;
+ default:
+ result |= DID_ERROR << 16;
+ break;
+ } /* end of switch */
+
+ return result;
+}
+
+/**
+ * ufshcd_transfer_rsp_status - Get overall status of the response
+ * @hba: per adapter instance
+ * @lrbp: pointer to local reference block of completed command
+ *
+ * Returns result of the command to notify SCSI midlayer
+ */
+static inline int
+ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ int result = 0;
+ int scsi_status;
+ int ocs;
+
+ /* overall command status of utrd */
+ ocs = ufshcd_get_tr_ocs(lrbp);
+
+ switch (ocs) {
+ case OCS_SUCCESS:
+ result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
+ hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
+ switch (result) {
+ case UPIU_TRANSACTION_RESPONSE:
+ /*
+ * get the response UPIU result to extract
+ * the SCSI command status
+ */
+ result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
+
+ /*
+ * get the result based on SCSI status response
+ * to notify the SCSI midlayer of the command status
+ */
+ scsi_status = result & MASK_SCSI_STATUS;
+ result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
+
+ /*
+ * Currently we are only supporting BKOPs exception
+ * events hence we can ignore BKOPs exception event
+ * during power management callbacks. BKOPs exception
+ * event is not expected to be raised in runtime suspend
+ * callback as it allows the urgent bkops.
+ * During system suspend, we are anyway forcefully
+ * disabling the bkops and if urgent bkops is needed
+ * it will be enabled on system resume. Long term
+ * solution could be to abort the system suspend if
+ * UFS device needs urgent BKOPs.
+ */
+ if (!hba->pm_op_in_progress &&
+ ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
+ schedule_work(&hba->eeh_work);
+ break;
+ case UPIU_TRANSACTION_REJECT_UPIU:
+ /* TODO: handle Reject UPIU Response */
+ result = DID_ERROR << 16;
+ dev_err(hba->dev,
+ "Reject UPIU not fully implemented\n");
+ break;
+ default:
+ result = DID_ERROR << 16;
+ dev_err(hba->dev,
+ "Unexpected request response code = %x\n",
+ result);
+ break;
+ }
+ break;
+ case OCS_ABORTED:
+ result |= DID_ABORT << 16;
+ break;
+ case OCS_INVALID_COMMAND_STATUS:
+ result |= DID_REQUEUE << 16;
+ break;
+ case OCS_INVALID_CMD_TABLE_ATTR:
+ case OCS_INVALID_PRDT_ATTR:
+ case OCS_MISMATCH_DATA_BUF_SIZE:
+ case OCS_MISMATCH_RESP_UPIU_SIZE:
+ case OCS_PEER_COMM_FAILURE:
+ case OCS_FATAL_ERROR:
+ default:
+ result |= DID_ERROR << 16;
+ dev_err(hba->dev,
+ "OCS error from controller = %x for tag %d\n",
+ ocs, lrbp->task_tag);
+ ufshcd_print_host_regs(hba);
+ ufshcd_print_host_state(hba);
+ break;
+ } /* end of switch */
+
+ if (host_byte(result) != DID_OK)
+ ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
+ return result;
+}
+
+/**
+ * ufshcd_uic_cmd_compl - handle completion of uic command
+ * @hba: per adapter instance
+ * @intr_status: interrupt status generated by the controller
+ */
+static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
+{
+ if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
+ hba->active_uic_cmd->argument2 |=
+ ufshcd_get_uic_cmd_result(hba);
+ hba->active_uic_cmd->argument3 =
+ ufshcd_get_dme_attr_val(hba);
+ complete(&hba->active_uic_cmd->done);
+ }
+
+ if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
+ complete(hba->uic_async_done);
+}
+
+/**
+ * __ufshcd_transfer_req_compl - handle SCSI and query command completion
+ * @hba: per adapter instance
+ * @completed_reqs: requests to complete
+ */
+static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
+ unsigned long completed_reqs)
+{
+ struct ufshcd_lrb *lrbp;
+ struct scsi_cmnd *cmd;
+ int result;
+ int index;
+
+ for_each_set_bit(index, &completed_reqs, hba->nutrs) {
+ lrbp = &hba->lrb[index];
+ cmd = lrbp->cmd;
+ if (cmd) {
+ ufshcd_add_command_trace(hba, index, "complete");
+ result = ufshcd_transfer_rsp_status(hba, lrbp);
+ scsi_dma_unmap(cmd);
+ cmd->result = result;
+ /* Mark completed command as NULL in LRB */
+ lrbp->cmd = NULL;
+ clear_bit_unlock(index, &hba->lrb_in_use);
+ /* Do not touch lrbp after scsi done */
+ cmd->scsi_done(cmd);
+ __ufshcd_release(hba);
+ } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
+ lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
+ if (hba->dev_cmd.complete) {
+ ufshcd_add_command_trace(hba, index,
+ "dev_complete");
+ complete(hba->dev_cmd.complete);
+ }
+ }
+ if (ufshcd_is_clkscaling_supported(hba))
+ hba->clk_scaling.active_reqs--;
+
+ lrbp->compl_time_stamp = ktime_get();
+ }
+
+ /* clear corresponding bits of completed commands */
+ hba->outstanding_reqs ^= completed_reqs;
+
+ ufshcd_clk_scaling_update_busy(hba);
+
+ /* we might have free'd some tags above */
+ wake_up(&hba->dev_cmd.tag_wq);
+}
+
+/**
+ * ufshcd_transfer_req_compl - handle SCSI and query command completion
+ * @hba: per adapter instance
+ */
+static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
+{
+ unsigned long completed_reqs;
+ u32 tr_doorbell;
+
+ /* Resetting interrupt aggregation counters first and reading the
+ * DOOR_BELL afterward allows us to handle all the completed requests.
+ * In order to prevent other interrupts starvation the DB is read once
+ * after reset. The down side of this solution is the possibility of
+ * false interrupt if device completes another request after resetting
+ * aggregation and before reading the DB.
+ */
+ if (ufshcd_is_intr_aggr_allowed(hba) &&
+ !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
+ ufshcd_reset_intr_aggr(hba);
+
+ tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
+
+ __ufshcd_transfer_req_compl(hba, completed_reqs);
+}
+
+/**
+ * ufshcd_disable_ee - disable exception event
+ * @hba: per-adapter instance
+ * @mask: exception event to disable
+ *
+ * Disables exception event in the device so that the EVENT_ALERT
+ * bit is not set.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
+{
+ int err = 0;
+ u32 val;
+
+ if (!(hba->ee_ctrl_mask & mask))
+ goto out;
+
+ val = hba->ee_ctrl_mask & ~mask;
+ val &= MASK_EE_STATUS;
+ err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
+ if (!err)
+ hba->ee_ctrl_mask &= ~mask;
+out:
+ return err;
+}
+
+/**
+ * ufshcd_enable_ee - enable exception event
+ * @hba: per-adapter instance
+ * @mask: exception event to enable
+ *
+ * Enable corresponding exception event in the device to allow
+ * device to alert host in critical scenarios.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
+{
+ int err = 0;
+ u32 val;
+
+ if (hba->ee_ctrl_mask & mask)
+ goto out;
+
+ val = hba->ee_ctrl_mask | mask;
+ val &= MASK_EE_STATUS;
+ err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
+ if (!err)
+ hba->ee_ctrl_mask |= mask;
+out:
+ return err;
+}
+
+/**
+ * ufshcd_enable_auto_bkops - Allow device managed BKOPS
+ * @hba: per-adapter instance
+ *
+ * Allow device to manage background operations on its own. Enabling
+ * this might lead to inconsistent latencies during normal data transfers
+ * as the device is allowed to manage its own way of handling background
+ * operations.
+ *
+ * Returns zero on success, non-zero on failure.
+ */
+static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
+{
+ int err = 0;
+
+ if (hba->auto_bkops_enabled)
+ goto out;
+
+ err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
+ QUERY_FLAG_IDN_BKOPS_EN, NULL);
+ if (err) {
+ dev_err(hba->dev, "%s: failed to enable bkops %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ hba->auto_bkops_enabled = true;
+ trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
+
+ /* No need of URGENT_BKOPS exception from the device */
+ err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
+ if (err)
+ dev_err(hba->dev, "%s: failed to disable exception event %d\n",
+ __func__, err);
+out:
+ return err;
+}
+
+/**
+ * ufshcd_disable_auto_bkops - block device in doing background operations
+ * @hba: per-adapter instance
+ *
+ * Disabling background operations improves command response latency but
+ * has drawback of device moving into critical state where the device is
+ * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
+ * host is idle so that BKOPS are managed effectively without any negative
+ * impacts.
+ *
+ * Returns zero on success, non-zero on failure.
+ */
+static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
+{
+ int err = 0;
+
+ if (!hba->auto_bkops_enabled)
+ goto out;
+
+ /*
+ * If host assisted BKOPs is to be enabled, make sure
+ * urgent bkops exception is allowed.
+ */
+ err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
+ if (err) {
+ dev_err(hba->dev, "%s: failed to enable exception event %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
+ QUERY_FLAG_IDN_BKOPS_EN, NULL);
+ if (err) {
+ dev_err(hba->dev, "%s: failed to disable bkops %d\n",
+ __func__, err);
+ ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
+ goto out;
+ }
+
+ hba->auto_bkops_enabled = false;
+ trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
+out:
+ return err;
+}
+
+/**
+ * ufshcd_force_reset_auto_bkops - force reset auto bkops state
+ * @hba: per adapter instance
+ *
+ * After a device reset the device may toggle the BKOPS_EN flag
+ * to default value. The s/w tracking variables should be updated
+ * as well. This function would change the auto-bkops state based on
+ * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
+ */
+static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
+{
+ if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
+ hba->auto_bkops_enabled = false;
+ hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
+ ufshcd_enable_auto_bkops(hba);
+ } else {
+ hba->auto_bkops_enabled = true;
+ hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
+ ufshcd_disable_auto_bkops(hba);
+ }
+}
+
+static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
+{
+ return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
+}
+
+/**
+ * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
+ * @hba: per-adapter instance
+ * @status: bkops_status value
+ *
+ * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
+ * flag in the device to permit background operations if the device
+ * bkops_status is greater than or equal to "status" argument passed to
+ * this function, disable otherwise.
+ *
+ * Returns 0 for success, non-zero in case of failure.
+ *
+ * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
+ * to know whether auto bkops is enabled or disabled after this function
+ * returns control to it.
+ */
+static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
+ enum bkops_status status)
+{
+ int err;
+ u32 curr_status = 0;
+
+ err = ufshcd_get_bkops_status(hba, &curr_status);
+ if (err) {
+ dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
+ __func__, err);
+ goto out;
+ } else if (curr_status > BKOPS_STATUS_MAX) {
+ dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
+ __func__, curr_status);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (curr_status >= status)
+ err = ufshcd_enable_auto_bkops(hba);
+ else
+ err = ufshcd_disable_auto_bkops(hba);
+out:
+ return err;
+}
+
+/**
+ * ufshcd_urgent_bkops - handle urgent bkops exception event
+ * @hba: per-adapter instance
+ *
+ * Enable fBackgroundOpsEn flag in the device to permit background
+ * operations.
+ *
+ * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
+ * and negative error value for any other failure.
+ */
+static int ufshcd_urgent_bkops(struct ufs_hba *hba)
+{
+ return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
+}
+
+static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
+{
+ return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
+}
+
+static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
+{
+ int err;
+ u32 curr_status = 0;
+
+ if (hba->is_urgent_bkops_lvl_checked)
+ goto enable_auto_bkops;
+
+ err = ufshcd_get_bkops_status(hba, &curr_status);
+ if (err) {
+ dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ /*
+ * We are seeing that some devices are raising the urgent bkops
+ * exception events even when BKOPS status doesn't indicate performace
+ * impacted or critical. Handle these device by determining their urgent
+ * bkops status at runtime.
+ */
+ if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
+ dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
+ __func__, curr_status);
+ /* update the current status as the urgent bkops level */
+ hba->urgent_bkops_lvl = curr_status;
+ hba->is_urgent_bkops_lvl_checked = true;
+ }
+
+enable_auto_bkops:
+ err = ufshcd_enable_auto_bkops(hba);
+out:
+ if (err < 0)
+ dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
+ __func__, err);
+}
+
+/**
+ * ufshcd_exception_event_handler - handle exceptions raised by device
+ * @work: pointer to work data
+ *
+ * Read bExceptionEventStatus attribute from the device and handle the
+ * exception event accordingly.
+ */
+static void ufshcd_exception_event_handler(struct work_struct *work)
+{
+ struct ufs_hba *hba;
+ int err;
+ u32 status = 0;
+ hba = container_of(work, struct ufs_hba, eeh_work);
+
+ pm_runtime_get_sync(hba->dev);
+ scsi_block_requests(hba->host);
+ err = ufshcd_get_ee_status(hba, &status);
+ if (err) {
+ dev_err(hba->dev, "%s: failed to get exception status %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ status &= hba->ee_ctrl_mask;
+
+ if (status & MASK_EE_URGENT_BKOPS)
+ ufshcd_bkops_exception_event_handler(hba);
+
+out:
+ scsi_unblock_requests(hba->host);
+ pm_runtime_put_sync(hba->dev);
+ return;
+}
+
+/* Complete requests that have door-bell cleared */
+static void ufshcd_complete_requests(struct ufs_hba *hba)
+{
+ ufshcd_transfer_req_compl(hba);
+ ufshcd_tmc_handler(hba);
+}
+
+/**
+ * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
+ * to recover from the DL NAC errors or not.
+ * @hba: per-adapter instance
+ *
+ * Returns true if error handling is required, false otherwise
+ */
+static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
+{
+ unsigned long flags;
+ bool err_handling = true;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ /*
+ * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
+ * device fatal error and/or DL NAC & REPLAY timeout errors.
+ */
+ if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
+ goto out;
+
+ if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
+ ((hba->saved_err & UIC_ERROR) &&
+ (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
+ goto out;
+
+ if ((hba->saved_err & UIC_ERROR) &&
+ (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
+ int err;
+ /*
+ * wait for 50ms to see if we can get any other errors or not.
+ */
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ msleep(50);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+
+ /*
+ * now check if we have got any other severe errors other than
+ * DL NAC error?
+ */
+ if ((hba->saved_err & INT_FATAL_ERRORS) ||
+ ((hba->saved_err & UIC_ERROR) &&
+ (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
+ goto out;
+
+ /*
+ * As DL NAC is the only error received so far, send out NOP
+ * command to confirm if link is still active or not.
+ * - If we don't get any response then do error recovery.
+ * - If we get response then clear the DL NAC error bit.
+ */
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ err = ufshcd_verify_dev_init(hba);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+
+ if (err)
+ goto out;
+
+ /* Link seems to be alive hence ignore the DL NAC errors */
+ if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
+ hba->saved_err &= ~UIC_ERROR;
+ /* clear NAC error */
+ hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
+ if (!hba->saved_uic_err) {
+ err_handling = false;
+ goto out;
+ }
+ }
+out:
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return err_handling;
+}
+
+/**
+ * ufshcd_err_handler - handle UFS errors that require s/w attention
+ * @work: pointer to work structure
+ */
+static void ufshcd_err_handler(struct work_struct *work)
+{
+ struct ufs_hba *hba;
+ unsigned long flags;
+ u32 err_xfer = 0;
+ u32 err_tm = 0;
+ int err = 0;
+ int tag;
+ bool needs_reset = false;
+
+ hba = container_of(work, struct ufs_hba, eh_work);
+
+ pm_runtime_get_sync(hba->dev);
+ ufshcd_hold(hba, false);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (hba->ufshcd_state == UFSHCD_STATE_RESET)
+ goto out;
+
+ hba->ufshcd_state = UFSHCD_STATE_RESET;
+ ufshcd_set_eh_in_progress(hba);
+
+ /* Complete requests that have door-bell cleared by h/w */
+ ufshcd_complete_requests(hba);
+
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
+ bool ret;
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
+ ret = ufshcd_quirk_dl_nac_errors(hba);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (!ret)
+ goto skip_err_handling;
+ }
+ if ((hba->saved_err & INT_FATAL_ERRORS) ||
+ ((hba->saved_err & UIC_ERROR) &&
+ (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
+ UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
+ UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
+ needs_reset = true;
+
+ /*
+ * if host reset is required then skip clearing the pending
+ * transfers forcefully because they will automatically get
+ * cleared after link startup.
+ */
+ if (needs_reset)
+ goto skip_pending_xfer_clear;
+
+ /* release lock as clear command might sleep */
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ /* Clear pending transfer requests */
+ for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
+ if (ufshcd_clear_cmd(hba, tag)) {
+ err_xfer = true;
+ goto lock_skip_pending_xfer_clear;
+ }
+ }
+
+ /* Clear pending task management requests */
+ for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
+ if (ufshcd_clear_tm_cmd(hba, tag)) {
+ err_tm = true;
+ goto lock_skip_pending_xfer_clear;
+ }
+ }
+
+lock_skip_pending_xfer_clear:
+ spin_lock_irqsave(hba->host->host_lock, flags);
+
+ /* Complete the requests that are cleared by s/w */
+ ufshcd_complete_requests(hba);
+
+ if (err_xfer || err_tm)
+ needs_reset = true;
+
+skip_pending_xfer_clear:
+ /* Fatal errors need reset */
+ if (needs_reset) {
+ unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
+
+ /*
+ * ufshcd_reset_and_restore() does the link reinitialization
+ * which will need atleast one empty doorbell slot to send the
+ * device management commands (NOP and query commands).
+ * If there is no slot empty at this moment then free up last
+ * slot forcefully.
+ */
+ if (hba->outstanding_reqs == max_doorbells)
+ __ufshcd_transfer_req_compl(hba,
+ (1UL << (hba->nutrs - 1)));
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ err = ufshcd_reset_and_restore(hba);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (err) {
+ dev_err(hba->dev, "%s: reset and restore failed\n",
+ __func__);
+ hba->ufshcd_state = UFSHCD_STATE_ERROR;
+ }
+ /*
+ * Inform scsi mid-layer that we did reset and allow to handle
+ * Unit Attention properly.
+ */
+ scsi_report_bus_reset(hba->host, 0);
+ hba->saved_err = 0;
+ hba->saved_uic_err = 0;
+ }
+
+skip_err_handling:
+ if (!needs_reset) {
+ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+ if (hba->saved_err || hba->saved_uic_err)
+ dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
+ __func__, hba->saved_err, hba->saved_uic_err);
+ }
+
+ ufshcd_clear_eh_in_progress(hba);
+
+out:
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ufshcd_scsi_unblock_requests(hba);
+ ufshcd_release(hba);
+ pm_runtime_put_sync(hba->dev);
+}
+
+static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
+ u32 reg)
+{
+ reg_hist->reg[reg_hist->pos] = reg;
+ reg_hist->tstamp[reg_hist->pos] = ktime_get();
+ reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH;
+}
+
+/**
+ * ufshcd_update_uic_error - check and set fatal UIC error flags.
+ * @hba: per-adapter instance
+ */
+static void ufshcd_update_uic_error(struct ufs_hba *hba)
+{
+ u32 reg;
+
+ /* PHY layer lane error */
+ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
+ /* Ignore LINERESET indication, as this is not an error */
+ if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
+ (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
+ /*
+ * To know whether this error is fatal or not, DB timeout
+ * must be checked but this error is handled separately.
+ */
+ dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
+ ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg);
+ }
+
+ /* PA_INIT_ERROR is fatal and needs UIC reset */
+ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
+ if (reg)
+ ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg);
+
+ if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
+ hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
+ else if (hba->dev_quirks &
+ UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
+ if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
+ hba->uic_error |=
+ UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
+ else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
+ hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
+ }
+
+ /* UIC NL/TL/DME errors needs software retry */
+ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
+ if (reg) {
+ ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg);
+ hba->uic_error |= UFSHCD_UIC_NL_ERROR;
+ }
+
+ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
+ if (reg) {
+ ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg);
+ hba->uic_error |= UFSHCD_UIC_TL_ERROR;
+ }
+
+ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
+ if (reg) {
+ ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg);
+ hba->uic_error |= UFSHCD_UIC_DME_ERROR;
+ }
+
+ dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
+ __func__, hba->uic_error);
+}
+
+/**
+ * ufshcd_check_errors - Check for errors that need s/w attention
+ * @hba: per-adapter instance
+ */
+static void ufshcd_check_errors(struct ufs_hba *hba)
+{
+ bool queue_eh_work = false;
+
+ if (hba->errors & INT_FATAL_ERRORS)
+ queue_eh_work = true;
+
+ if (hba->errors & UIC_ERROR) {
+ hba->uic_error = 0;
+ ufshcd_update_uic_error(hba);
+ if (hba->uic_error)
+ queue_eh_work = true;
+ }
+
+ if (queue_eh_work) {
+ /*
+ * update the transfer error masks to sticky bits, let's do this
+ * irrespective of current ufshcd_state.
+ */
+ hba->saved_err |= hba->errors;
+ hba->saved_uic_err |= hba->uic_error;
+
+ /* handle fatal errors only when link is functional */
+ if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
+ /* block commands from scsi mid-layer */
+ ufshcd_scsi_block_requests(hba);
+
+ hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
+
+ /* dump controller state before resetting */
+ if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
+ bool pr_prdt = !!(hba->saved_err &
+ SYSTEM_BUS_FATAL_ERROR);
+
+ dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
+ __func__, hba->saved_err,
+ hba->saved_uic_err);
+
+ ufshcd_print_host_regs(hba);
+ ufshcd_print_pwr_info(hba);
+ ufshcd_print_tmrs(hba, hba->outstanding_tasks);
+ ufshcd_print_trs(hba, hba->outstanding_reqs,
+ pr_prdt);
+ }
+ schedule_work(&hba->eh_work);
+ }
+ }
+ /*
+ * if (!queue_eh_work) -
+ * Other errors are either non-fatal where host recovers
+ * itself without s/w intervention or errors that will be
+ * handled by the SCSI core layer.
+ */
+}
+
+/**
+ * ufshcd_tmc_handler - handle task management function completion
+ * @hba: per adapter instance
+ */
+static void ufshcd_tmc_handler(struct ufs_hba *hba)
+{
+ u32 tm_doorbell;
+
+ tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
+ hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
+ wake_up(&hba->tm_wq);
+}
+
+/**
+ * ufshcd_sl_intr - Interrupt service routine
+ * @hba: per adapter instance
+ * @intr_status: contains interrupts generated by the controller
+ */
+static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
+{
+ hba->errors = UFSHCD_ERROR_MASK & intr_status;
+ if (hba->errors)
+ ufshcd_check_errors(hba);
+
+ if (intr_status & UFSHCD_UIC_MASK)
+ ufshcd_uic_cmd_compl(hba, intr_status);
+
+ if (intr_status & UTP_TASK_REQ_COMPL)
+ ufshcd_tmc_handler(hba);
+
+ if (intr_status & UTP_TRANSFER_REQ_COMPL)
+ ufshcd_transfer_req_compl(hba);
+}
+
+/**
+ * ufshcd_intr - Main interrupt service routine
+ * @irq: irq number
+ * @__hba: pointer to adapter instance
+ *
+ * Returns IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
+ */
+static irqreturn_t ufshcd_intr(int irq, void *__hba)
+{
+ u32 intr_status, enabled_intr_status;
+ irqreturn_t retval = IRQ_NONE;
+ struct ufs_hba *hba = __hba;
+ int retries = hba->nutrs;
+
+ spin_lock(hba->host->host_lock);
+ intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+
+ /*
+ * There could be max of hba->nutrs reqs in flight and in worst case
+ * if the reqs get finished 1 by 1 after the interrupt status is
+ * read, make sure we handle them by checking the interrupt status
+ * again in a loop until we process all of the reqs before returning.
+ */
+ do {
+ enabled_intr_status =
+ intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+ if (intr_status)
+ ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
+ if (enabled_intr_status) {
+ ufshcd_sl_intr(hba, enabled_intr_status);
+ retval = IRQ_HANDLED;
+ }
+
+ intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+ } while (intr_status && --retries);
+
+ spin_unlock(hba->host->host_lock);
+ return retval;
+}
+
+static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
+{
+ int err = 0;
+ u32 mask = 1 << tag;
+ unsigned long flags;
+
+ if (!test_bit(tag, &hba->outstanding_tasks))
+ goto out;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_utmrl_clear(hba, tag);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ /* poll for max. 1 sec to clear door bell register by h/w */
+ err = ufshcd_wait_for_register(hba,
+ REG_UTP_TASK_REQ_DOOR_BELL,
+ mask, 0, 1000, 1000, true);
+out:
+ return err;
+}
+
+/**
+ * ufshcd_issue_tm_cmd - issues task management commands to controller
+ * @hba: per adapter instance
+ * @lun_id: LUN ID to which TM command is sent
+ * @task_id: task ID to which the TM command is applicable
+ * @tm_function: task management function opcode
+ * @tm_response: task management service response return value
+ *
+ * Returns non-zero value on error, zero on success.
+ */
+static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
+ u8 tm_function, u8 *tm_response)
+{
+ struct utp_task_req_desc *task_req_descp;
+ struct utp_upiu_task_req *task_req_upiup;
+ struct Scsi_Host *host;
+ unsigned long flags;
+ int free_slot;
+ int err;
+ int task_tag;
+
+ host = hba->host;
+
+ /*
+ * Get free slot, sleep if slots are unavailable.
+ * Even though we use wait_event() which sleeps indefinitely,
+ * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
+ */
+ wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
+ ufshcd_hold(hba, false);
+
+ spin_lock_irqsave(host->host_lock, flags);
+ task_req_descp = hba->utmrdl_base_addr;
+ task_req_descp += free_slot;
+
+ /* Configure task request descriptor */
+ task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
+ task_req_descp->header.dword_2 =
+ cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
+
+ /* Configure task request UPIU */
+ task_req_upiup =
+ (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
+ task_tag = hba->nutrs + free_slot;
+ task_req_upiup->header.dword_0 =
+ UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
+ lun_id, task_tag);
+ task_req_upiup->header.dword_1 =
+ UPIU_HEADER_DWORD(0, tm_function, 0, 0);
+ /*
+ * The host shall provide the same value for LUN field in the basic
+ * header and for Input Parameter.
+ */
+ task_req_upiup->input_param1 = cpu_to_be32(lun_id);
+ task_req_upiup->input_param2 = cpu_to_be32(task_id);
+
+ ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
+
+ /* send command to the controller */
+ __set_bit(free_slot, &hba->outstanding_tasks);
+
+ /* Make sure descriptors are ready before ringing the task doorbell */
+ wmb();
+
+ ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
+ /* Make sure that doorbell is committed immediately */
+ wmb();
+
+ spin_unlock_irqrestore(host->host_lock, flags);
+
+ ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
+
+ /* wait until the task management command is completed */
+ err = wait_event_timeout(hba->tm_wq,
+ test_bit(free_slot, &hba->tm_condition),
+ msecs_to_jiffies(TM_CMD_TIMEOUT));
+ if (!err) {
+ ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
+ dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
+ __func__, tm_function);
+ if (ufshcd_clear_tm_cmd(hba, free_slot))
+ dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
+ __func__, free_slot);
+ err = -ETIMEDOUT;
+ } else {
+ err = ufshcd_task_req_compl(hba, free_slot, tm_response);
+ ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
+ }
+
+ clear_bit(free_slot, &hba->tm_condition);
+ ufshcd_put_tm_slot(hba, free_slot);
+ wake_up(&hba->tm_tag_wq);
+
+ ufshcd_release(hba);
+ return err;
+}
+
+/**
+ * ufshcd_eh_device_reset_handler - device reset handler registered to
+ * scsi layer.
+ * @cmd: SCSI command pointer
+ *
+ * Returns SUCCESS/FAILED
+ */
+static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *host;
+ struct ufs_hba *hba;
+ unsigned int tag;
+ u32 pos;
+ int err;
+ u8 resp = 0xF;
+ struct ufshcd_lrb *lrbp;
+ unsigned long flags;
+
+ host = cmd->device->host;
+ hba = shost_priv(host);
+ tag = cmd->request->tag;
+
+ lrbp = &hba->lrb[tag];
+ err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
+ if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
+ if (!err)
+ err = resp;
+ goto out;
+ }
+
+ /* clear the commands that were pending for corresponding LUN */
+ for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
+ if (hba->lrb[pos].lun == lrbp->lun) {
+ err = ufshcd_clear_cmd(hba, pos);
+ if (err)
+ break;
+ }
+ }
+ spin_lock_irqsave(host->host_lock, flags);
+ ufshcd_transfer_req_compl(hba);
+ spin_unlock_irqrestore(host->host_lock, flags);
+
+out:
+ hba->req_abort_count = 0;
+ if (!err) {
+ err = SUCCESS;
+ } else {
+ dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
+ err = FAILED;
+ }
+ return err;
+}
+
+static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
+{
+ struct ufshcd_lrb *lrbp;
+ int tag;
+
+ for_each_set_bit(tag, &bitmap, hba->nutrs) {
+ lrbp = &hba->lrb[tag];
+ lrbp->req_abort_skip = true;
+ }
+}
+
+/**
+ * ufshcd_abort - abort a specific command
+ * @cmd: SCSI command pointer
+ *
+ * Abort the pending command in device by sending UFS_ABORT_TASK task management
+ * command, and in host controller by clearing the door-bell register. There can
+ * be race between controller sending the command to the device while abort is
+ * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
+ * really issued and then try to abort it.
+ *
+ * Returns SUCCESS/FAILED
+ */
+static int ufshcd_abort(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *host;
+ struct ufs_hba *hba;
+ unsigned long flags;
+ unsigned int tag;
+ int err = 0;
+ int poll_cnt;
+ u8 resp = 0xF;
+ struct ufshcd_lrb *lrbp;
+ u32 reg;
+
+ host = cmd->device->host;
+ hba = shost_priv(host);
+ tag = cmd->request->tag;
+ lrbp = &hba->lrb[tag];
+ if (!ufshcd_valid_tag(hba, tag)) {
+ dev_err(hba->dev,
+ "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
+ __func__, tag, cmd, cmd->request);
+ BUG();
+ }
+
+ /*
+ * Task abort to the device W-LUN is illegal. When this command
+ * will fail, due to spec violation, scsi err handling next step
+ * will be to send LU reset which, again, is a spec violation.
+ * To avoid these unnecessary/illegal step we skip to the last error
+ * handling stage: reset and restore.
+ */
+ if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
+ return ufshcd_eh_host_reset_handler(cmd);
+
+ ufshcd_hold(hba, false);
+ reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ /* If command is already aborted/completed, return SUCCESS */
+ if (!(test_bit(tag, &hba->outstanding_reqs))) {
+ dev_err(hba->dev,
+ "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
+ __func__, tag, hba->outstanding_reqs, reg);
+ goto out;
+ }
+
+ if (!(reg & (1 << tag))) {
+ dev_err(hba->dev,
+ "%s: cmd was completed, but without a notifying intr, tag = %d",
+ __func__, tag);
+ }
+
+ /* Print Transfer Request of aborted task */
+ dev_err(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
+
+ /*
+ * Print detailed info about aborted request.
+ * As more than one request might get aborted at the same time,
+ * print full information only for the first aborted request in order
+ * to reduce repeated printouts. For other aborted requests only print
+ * basic details.
+ */
+ scsi_print_command(hba->lrb[tag].cmd);
+ if (!hba->req_abort_count) {
+ ufshcd_print_host_regs(hba);
+ ufshcd_print_host_state(hba);
+ ufshcd_print_pwr_info(hba);
+ ufshcd_print_trs(hba, 1 << tag, true);
+ } else {
+ ufshcd_print_trs(hba, 1 << tag, false);
+ }
+ hba->req_abort_count++;
+
+ /* Skip task abort in case previous aborts failed and report failure */
+ if (lrbp->req_abort_skip) {
+ err = -EIO;
+ goto out;
+ }
+
+ for (poll_cnt = 100; poll_cnt; poll_cnt--) {
+ err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
+ UFS_QUERY_TASK, &resp);
+ if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
+ /* cmd pending in the device */
+ dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
+ __func__, tag);
+ break;
+ } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
+ /*
+ * cmd not pending in the device, check if it is
+ * in transition.
+ */
+ dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
+ __func__, tag);
+ reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ if (reg & (1 << tag)) {
+ /* sleep for max. 200us to stabilize */
+ usleep_range(100, 200);
+ continue;
+ }
+ /* command completed already */
+ dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
+ __func__, tag);
+ goto out;
+ } else {
+ dev_err(hba->dev,
+ "%s: no response from device. tag = %d, err %d\n",
+ __func__, tag, err);
+ if (!err)
+ err = resp; /* service response error */
+ goto out;
+ }
+ }
+
+ if (!poll_cnt) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
+ UFS_ABORT_TASK, &resp);
+ if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
+ if (!err) {
+ err = resp; /* service response error */
+ dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
+ __func__, tag, err);
+ }
+ goto out;
+ }
+
+ err = ufshcd_clear_cmd(hba, tag);
+ if (err) {
+ dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
+ __func__, tag, err);
+ goto out;
+ }
+
+ scsi_dma_unmap(cmd);
+
+ spin_lock_irqsave(host->host_lock, flags);
+ ufshcd_outstanding_req_clear(hba, tag);
+ hba->lrb[tag].cmd = NULL;
+ spin_unlock_irqrestore(host->host_lock, flags);
+
+ clear_bit_unlock(tag, &hba->lrb_in_use);
+ wake_up(&hba->dev_cmd.tag_wq);
+
+out:
+ if (!err) {
+ err = SUCCESS;
+ } else {
+ dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
+ ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
+ err = FAILED;
+ }
+
+ /*
+ * This ufshcd_release() corresponds to the original scsi cmd that got
+ * aborted here (as we won't get any IRQ for it).
+ */
+ ufshcd_release(hba);
+ return err;
+}
+
+/**
+ * ufshcd_host_reset_and_restore - reset and restore host controller
+ * @hba: per-adapter instance
+ *
+ * Note that host controller reset may issue DME_RESET to
+ * local and remote (device) Uni-Pro stack and the attributes
+ * are reset to default state.
+ *
+ * Returns zero on success, non-zero on failure
+ */
+static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
+{
+ int err;
+ unsigned long flags;
+
+ /* Reset the host controller */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_hba_stop(hba, false);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ /* scale up clocks to max frequency before full reinitialization */
+ ufshcd_scale_clks(hba, true);
+
+ err = ufshcd_hba_enable(hba);
+ if (err)
+ goto out;
+
+ /* Establish the link again and restore the device */
+ err = ufshcd_probe_hba(hba);
+
+ if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
+ err = -EIO;
+out:
+ if (err)
+ dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
+
+ return err;
+}
+
+/**
+ * ufshcd_reset_and_restore - reset and re-initialize host/device
+ * @hba: per-adapter instance
+ *
+ * Reset and recover device, host and re-establish link. This
+ * is helpful to recover the communication in fatal error conditions.
+ *
+ * Returns zero on success, non-zero on failure
+ */
+static int ufshcd_reset_and_restore(struct ufs_hba *hba)
+{
+ int err = 0;
+ unsigned long flags;
+ int retries = MAX_HOST_RESET_RETRIES;
+
+ do {
+ err = ufshcd_host_reset_and_restore(hba);
+ } while (err && --retries);
+
+ /*
+ * After reset the door-bell might be cleared, complete
+ * outstanding requests in s/w here.
+ */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_transfer_req_compl(hba);
+ ufshcd_tmc_handler(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ return err;
+}
+
+/**
+ * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
+ * @cmd: SCSI command pointer
+ *
+ * Returns SUCCESS/FAILED
+ */
+static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
+{
+ int err;
+ unsigned long flags;
+ struct ufs_hba *hba;
+
+ hba = shost_priv(cmd->device->host);
+
+ ufshcd_hold(hba, false);
+ /*
+ * Check if there is any race with fatal error handling.
+ * If so, wait for it to complete. Even though fatal error
+ * handling does reset and restore in some cases, don't assume
+ * anything out of it. We are just avoiding race here.
+ */
+ do {
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (!(work_pending(&hba->eh_work) ||
+ hba->ufshcd_state == UFSHCD_STATE_RESET ||
+ hba->ufshcd_state == UFSHCD_STATE_EH_SCHEDULED))
+ break;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
+ flush_work(&hba->eh_work);
+ } while (1);
+
+ hba->ufshcd_state = UFSHCD_STATE_RESET;
+ ufshcd_set_eh_in_progress(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ err = ufshcd_reset_and_restore(hba);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (!err) {
+ err = SUCCESS;
+ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+ } else {
+ err = FAILED;
+ hba->ufshcd_state = UFSHCD_STATE_ERROR;
+ }
+ ufshcd_clear_eh_in_progress(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ ufshcd_release(hba);
+ return err;
+}
+
+/**
+ * ufshcd_get_max_icc_level - calculate the ICC level
+ * @sup_curr_uA: max. current supported by the regulator
+ * @start_scan: row at the desc table to start scan from
+ * @buff: power descriptor buffer
+ *
+ * Returns calculated max ICC level for specific regulator
+ */
+static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
+{
+ int i;
+ int curr_uA;
+ u16 data;
+ u16 unit;
+
+ for (i = start_scan; i >= 0; i--) {
+ data = be16_to_cpup((__be16 *)&buff[2 * i]);
+ unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
+ ATTR_ICC_LVL_UNIT_OFFSET;
+ curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
+ switch (unit) {
+ case UFSHCD_NANO_AMP:
+ curr_uA = curr_uA / 1000;
+ break;
+ case UFSHCD_MILI_AMP:
+ curr_uA = curr_uA * 1000;
+ break;
+ case UFSHCD_AMP:
+ curr_uA = curr_uA * 1000 * 1000;
+ break;
+ case UFSHCD_MICRO_AMP:
+ default:
+ break;
+ }
+ if (sup_curr_uA >= curr_uA)
+ break;
+ }
+ if (i < 0) {
+ i = 0;
+ pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
+ }
+
+ return (u32)i;
+}
+
+/**
+ * ufshcd_calc_icc_level - calculate the max ICC level
+ * In case regulators are not initialized we'll return 0
+ * @hba: per-adapter instance
+ * @desc_buf: power descriptor buffer to extract ICC levels from.
+ * @len: length of desc_buff
+ *
+ * Returns calculated ICC level
+ */
+static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
+ u8 *desc_buf, int len)
+{
+ u32 icc_level = 0;
+
+ if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
+ !hba->vreg_info.vccq2) {
+ dev_err(hba->dev,
+ "%s: Regulator capability was not set, actvIccLevel=%d",
+ __func__, icc_level);
+ goto out;
+ }
+
+ if (hba->vreg_info.vcc)
+ icc_level = ufshcd_get_max_icc_level(
+ hba->vreg_info.vcc->max_uA,
+ POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
+ &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
+
+ if (hba->vreg_info.vccq)
+ icc_level = ufshcd_get_max_icc_level(
+ hba->vreg_info.vccq->max_uA,
+ icc_level,
+ &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
+
+ if (hba->vreg_info.vccq2)
+ icc_level = ufshcd_get_max_icc_level(
+ hba->vreg_info.vccq2->max_uA,
+ icc_level,
+ &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
+out:
+ return icc_level;
+}
+
+static void ufshcd_init_icc_levels(struct ufs_hba *hba)
+{
+ int ret;
+ int buff_len = hba->desc_size.pwr_desc;
+ u8 *desc_buf;
+
+ desc_buf = kmalloc(buff_len, GFP_KERNEL);
+ if (!desc_buf)
+ return;
+
+ ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
+ if (ret) {
+ dev_err(hba->dev,
+ "%s: Failed reading power descriptor.len = %d ret = %d",
+ __func__, buff_len, ret);
+ goto out;
+ }
+
+ hba->init_prefetch_data.icc_level =
+ ufshcd_find_max_sup_active_icc_level(hba,
+ desc_buf, buff_len);
+ dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
+ __func__, hba->init_prefetch_data.icc_level);
+
+ ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
+ &hba->init_prefetch_data.icc_level);
+
+ if (ret)
+ dev_err(hba->dev,
+ "%s: Failed configuring bActiveICCLevel = %d ret = %d",
+ __func__, hba->init_prefetch_data.icc_level , ret);
+
+out:
+ kfree(desc_buf);
+}
+
+/**
+ * ufshcd_scsi_add_wlus - Adds required W-LUs
+ * @hba: per-adapter instance
+ *
+ * UFS device specification requires the UFS devices to support 4 well known
+ * logical units:
+ * "REPORT_LUNS" (address: 01h)
+ * "UFS Device" (address: 50h)
+ * "RPMB" (address: 44h)
+ * "BOOT" (address: 30h)
+ * UFS device's power management needs to be controlled by "POWER CONDITION"
+ * field of SSU (START STOP UNIT) command. But this "power condition" field
+ * will take effect only when its sent to "UFS device" well known logical unit
+ * hence we require the scsi_device instance to represent this logical unit in
+ * order for the UFS host driver to send the SSU command for power management.
+ *
+ * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
+ * Block) LU so user space process can control this LU. User space may also
+ * want to have access to BOOT LU.
+ *
+ * This function adds scsi device instances for each of all well known LUs
+ * (except "REPORT LUNS" LU).
+ *
+ * Returns zero on success (all required W-LUs are added successfully),
+ * non-zero error value on failure (if failed to add any of the required W-LU).
+ */
+static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
+{
+ int ret = 0;
+ struct scsi_device *sdev_rpmb;
+ struct scsi_device *sdev_boot;
+
+ hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
+ ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
+ if (IS_ERR(hba->sdev_ufs_device)) {
+ ret = PTR_ERR(hba->sdev_ufs_device);
+ hba->sdev_ufs_device = NULL;
+ goto out;
+ }
+ scsi_device_put(hba->sdev_ufs_device);
+
+ sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
+ ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
+ if (IS_ERR(sdev_rpmb)) {
+ ret = PTR_ERR(sdev_rpmb);
+ goto remove_sdev_ufs_device;
+ }
+ scsi_device_put(sdev_rpmb);
+
+ sdev_boot = __scsi_add_device(hba->host, 0, 0,
+ ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
+ if (IS_ERR(sdev_boot))
+ dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
+ else
+ scsi_device_put(sdev_boot);
+ goto out;
+
+remove_sdev_ufs_device:
+ scsi_remove_device(hba->sdev_ufs_device);
+out:
+ return ret;
+}
+
+static int ufs_get_device_desc(struct ufs_hba *hba,
+ struct ufs_dev_desc *dev_desc)
+{
+ int err;
+ size_t buff_len;
+ u8 model_index;
+ u8 *desc_buf;
+
+ buff_len = max_t(size_t, hba->desc_size.dev_desc,
+ QUERY_DESC_MAX_SIZE + 1);
+ desc_buf = kmalloc(buff_len, GFP_KERNEL);
+ if (!desc_buf) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
+ if (err) {
+ dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ /*
+ * getting vendor (manufacturerID) and Bank Index in big endian
+ * format
+ */
+ dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
+ desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
+
+ model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
+
+ /* Zero-pad entire buffer for string termination. */
+ memset(desc_buf, 0, buff_len);
+
+ err = ufshcd_read_string_desc(hba, model_index, desc_buf,
+ QUERY_DESC_MAX_SIZE, true/*ASCII*/);
+ if (err) {
+ dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
+ strlcpy(dev_desc->model, (desc_buf + QUERY_DESC_HDR_SIZE),
+ min_t(u8, desc_buf[QUERY_DESC_LENGTH_OFFSET],
+ MAX_MODEL_LEN));
+
+ /* Null terminate the model string */
+ dev_desc->model[MAX_MODEL_LEN] = '\0';
+
+out:
+ kfree(desc_buf);
+ return err;
+}
+
+static void ufs_fixup_device_setup(struct ufs_hba *hba,
+ struct ufs_dev_desc *dev_desc)
+{
+ struct ufs_dev_fix *f;
+
+ for (f = ufs_fixups; f->quirk; f++) {
+ if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid ||
+ f->card.wmanufacturerid == UFS_ANY_VENDOR) &&
+ (STR_PRFX_EQUAL(f->card.model, dev_desc->model) ||
+ !strcmp(f->card.model, UFS_ANY_MODEL)))
+ hba->dev_quirks |= f->quirk;
+ }
+}
+
+/**
+ * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
+ * @hba: per-adapter instance
+ *
+ * PA_TActivate parameter can be tuned manually if UniPro version is less than
+ * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
+ * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
+ * the hibern8 exit latency.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
+{
+ int ret = 0;
+ u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
+
+ ret = ufshcd_dme_peer_get(hba,
+ UIC_ARG_MIB_SEL(
+ RX_MIN_ACTIVATETIME_CAPABILITY,
+ UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
+ &peer_rx_min_activatetime);
+ if (ret)
+ goto out;
+
+ /* make sure proper unit conversion is applied */
+ tuned_pa_tactivate =
+ ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
+ / PA_TACTIVATE_TIME_UNIT_US);
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
+ tuned_pa_tactivate);
+
+out:
+ return ret;
+}
+
+/**
+ * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
+ * @hba: per-adapter instance
+ *
+ * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
+ * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
+ * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
+ * This optimal value can help reduce the hibern8 exit latency.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
+{
+ int ret = 0;
+ u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
+ u32 max_hibern8_time, tuned_pa_hibern8time;
+
+ ret = ufshcd_dme_get(hba,
+ UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
+ UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
+ &local_tx_hibern8_time_cap);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_dme_peer_get(hba,
+ UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
+ UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
+ &peer_rx_hibern8_time_cap);
+ if (ret)
+ goto out;
+
+ max_hibern8_time = max(local_tx_hibern8_time_cap,
+ peer_rx_hibern8_time_cap);
+ /* make sure proper unit conversion is applied */
+ tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
+ / PA_HIBERN8_TIME_UNIT_US);
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
+ tuned_pa_hibern8time);
+out:
+ return ret;
+}
+
+/**
+ * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
+ * less than device PA_TACTIVATE time.
+ * @hba: per-adapter instance
+ *
+ * Some UFS devices require host PA_TACTIVATE to be lower than device
+ * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
+ * for such devices.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
+{
+ int ret = 0;
+ u32 granularity, peer_granularity;
+ u32 pa_tactivate, peer_pa_tactivate;
+ u32 pa_tactivate_us, peer_pa_tactivate_us;
+ u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
+ &granularity);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
+ &peer_granularity);
+ if (ret)
+ goto out;
+
+ if ((granularity < PA_GRANULARITY_MIN_VAL) ||
+ (granularity > PA_GRANULARITY_MAX_VAL)) {
+ dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
+ __func__, granularity);
+ return -EINVAL;
+ }
+
+ if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
+ (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
+ dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
+ __func__, peer_granularity);
+ return -EINVAL;
+ }
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
+ &peer_pa_tactivate);
+ if (ret)
+ goto out;
+
+ pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
+ peer_pa_tactivate_us = peer_pa_tactivate *
+ gran_to_us_table[peer_granularity - 1];
+
+ if (pa_tactivate_us > peer_pa_tactivate_us) {
+ u32 new_peer_pa_tactivate;
+
+ new_peer_pa_tactivate = pa_tactivate_us /
+ gran_to_us_table[peer_granularity - 1];
+ new_peer_pa_tactivate++;
+ ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
+ new_peer_pa_tactivate);
+ }
+
+out:
+ return ret;
+}
+
+static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
+{
+ if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
+ ufshcd_tune_pa_tactivate(hba);
+ ufshcd_tune_pa_hibern8time(hba);
+ }
+
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
+ /* set 1ms timeout for PA_TACTIVATE */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
+
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
+ ufshcd_quirk_tune_host_pa_tactivate(hba);
+
+ ufshcd_vops_apply_dev_quirks(hba);
+}
+
+static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
+{
+ int err_reg_hist_size = sizeof(struct ufs_uic_err_reg_hist);
+
+ hba->ufs_stats.hibern8_exit_cnt = 0;
+ hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
+
+ memset(&hba->ufs_stats.pa_err, 0, err_reg_hist_size);
+ memset(&hba->ufs_stats.dl_err, 0, err_reg_hist_size);
+ memset(&hba->ufs_stats.nl_err, 0, err_reg_hist_size);
+ memset(&hba->ufs_stats.tl_err, 0, err_reg_hist_size);
+ memset(&hba->ufs_stats.dme_err, 0, err_reg_hist_size);
+
+ hba->req_abort_count = 0;
+}
+
+static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
+{
+ int err;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
+ &hba->desc_size.dev_desc);
+ if (err)
+ hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
+ &hba->desc_size.pwr_desc);
+ if (err)
+ hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
+ &hba->desc_size.interc_desc);
+ if (err)
+ hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
+ &hba->desc_size.conf_desc);
+ if (err)
+ hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
+ &hba->desc_size.unit_desc);
+ if (err)
+ hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
+ &hba->desc_size.geom_desc);
+ if (err)
+ hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0,
+ &hba->desc_size.hlth_desc);
+ if (err)
+ hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
+}
+
+static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
+{
+ hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
+ hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
+ hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
+ hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
+ hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
+ hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
+ hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
+}
+
+/**
+ * ufshcd_probe_hba - probe hba to detect device and initialize
+ * @hba: per-adapter instance
+ *
+ * Execute link-startup and verify device initialization
+ */
+static int ufshcd_probe_hba(struct ufs_hba *hba)
+{
+ struct ufs_dev_desc card = {0};
+ int ret;
+ ktime_t start = ktime_get();
+
+ ret = ufshcd_link_startup(hba);
+ if (ret)
+ goto out;
+
+ /* set the default level for urgent bkops */
+ hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
+ hba->is_urgent_bkops_lvl_checked = false;
+
+ /* Debug counters initialization */
+ ufshcd_clear_dbg_ufs_stats(hba);
+
+ /* UniPro link is active now */
+ ufshcd_set_link_active(hba);
+
+ /* Enable Auto-Hibernate if configured */
+ ufshcd_auto_hibern8_enable(hba);
+
+ ret = ufshcd_verify_dev_init(hba);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_complete_dev_init(hba);
+ if (ret)
+ goto out;
+
+ /* Init check for device descriptor sizes */
+ ufshcd_init_desc_sizes(hba);
+
+ ret = ufs_get_device_desc(hba, &card);
+ if (ret) {
+ dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
+ __func__, ret);
+ goto out;
+ }
+
+ ufs_fixup_device_setup(hba, &card);
+ ufshcd_tune_unipro_params(hba);
+
+ ret = ufshcd_set_vccq_rail_unused(hba,
+ (hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false);
+ if (ret)
+ goto out;
+
+ /* UFS device is also active now */
+ ufshcd_set_ufs_dev_active(hba);
+ ufshcd_force_reset_auto_bkops(hba);
+ hba->wlun_dev_clr_ua = true;
+
+ if (ufshcd_get_max_pwr_mode(hba)) {
+ dev_err(hba->dev,
+ "%s: Failed getting max supported power mode\n",
+ __func__);
+ } else {
+ ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
+ if (ret) {
+ dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
+ __func__, ret);
+ goto out;
+ }
+ }
+
+ /* set the state as operational after switching to desired gear */
+ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+
+ /*
+ * If we are in error handling context or in power management callbacks
+ * context, no need to scan the host
+ */
+ if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
+ bool flag;
+
+ /* clear any previous UFS device information */
+ memset(&hba->dev_info, 0, sizeof(hba->dev_info));
+ if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
+ QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
+ hba->dev_info.f_power_on_wp_en = flag;
+
+ if (!hba->is_init_prefetch)
+ ufshcd_init_icc_levels(hba);
+
+ /* Add required well known logical units to scsi mid layer */
+ if (ufshcd_scsi_add_wlus(hba))
+ goto out;
+
+ /* Initialize devfreq after UFS device is detected */
+ if (ufshcd_is_clkscaling_supported(hba)) {
+ memcpy(&hba->clk_scaling.saved_pwr_info.info,
+ &hba->pwr_info,
+ sizeof(struct ufs_pa_layer_attr));
+ hba->clk_scaling.saved_pwr_info.is_valid = true;
+ if (!hba->devfreq) {
+ ret = ufshcd_devfreq_init(hba);
+ if (ret)
+ goto out;
+ }
+ hba->clk_scaling.is_allowed = true;
+ }
+
+ scsi_scan_host(hba->host);
+ pm_runtime_put_sync(hba->dev);
+ }
+
+ if (!hba->is_init_prefetch)
+ hba->is_init_prefetch = true;
+
+out:
+ /*
+ * If we failed to initialize the device or the device is not
+ * present, turn off the power/clocks etc.
+ */
+ if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
+ pm_runtime_put_sync(hba->dev);
+ ufshcd_hba_exit(hba);
+ }
+
+ trace_ufshcd_init(dev_name(hba->dev), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)),
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
+ return ret;
+}
+
+/**
+ * ufshcd_async_scan - asynchronous execution for probing hba
+ * @data: data pointer to pass to this function
+ * @cookie: cookie data
+ */
+static void ufshcd_async_scan(void *data, async_cookie_t cookie)
+{
+ struct ufs_hba *hba = (struct ufs_hba *)data;
+
+ ufshcd_probe_hba(hba);
+}
+
+static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
+{
+ unsigned long flags;
+ struct Scsi_Host *host;
+ struct ufs_hba *hba;
+ int index;
+ bool found = false;
+
+ if (!scmd || !scmd->device || !scmd->device->host)
+ return BLK_EH_DONE;
+
+ host = scmd->device->host;
+ hba = shost_priv(host);
+ if (!hba)
+ return BLK_EH_DONE;
+
+ spin_lock_irqsave(host->host_lock, flags);
+
+ for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
+ if (hba->lrb[index].cmd == scmd) {
+ found = true;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(host->host_lock, flags);
+
+ /*
+ * Bypass SCSI error handling and reset the block layer timer if this
+ * SCSI command was not actually dispatched to UFS driver, otherwise
+ * let SCSI layer handle the error as usual.
+ */
+ return found ? BLK_EH_DONE : BLK_EH_RESET_TIMER;
+}
+
+static const struct attribute_group *ufshcd_driver_groups[] = {
+ &ufs_sysfs_unit_descriptor_group,
+ &ufs_sysfs_lun_attributes_group,
+ NULL,
+};
+
+static struct scsi_host_template ufshcd_driver_template = {
+ .module = THIS_MODULE,
+ .name = UFSHCD,
+ .proc_name = UFSHCD,
+ .queuecommand = ufshcd_queuecommand,
+ .slave_alloc = ufshcd_slave_alloc,
+ .slave_configure = ufshcd_slave_configure,
+ .slave_destroy = ufshcd_slave_destroy,
+ .change_queue_depth = ufshcd_change_queue_depth,
+ .eh_abort_handler = ufshcd_abort,
+ .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
+ .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
+ .eh_timed_out = ufshcd_eh_timed_out,
+ .this_id = -1,
+ .sg_tablesize = SG_ALL,
+ .cmd_per_lun = UFSHCD_CMD_PER_LUN,
+ .can_queue = UFSHCD_CAN_QUEUE,
+ .max_host_blocked = 1,
+ .track_queue_depth = 1,
+ .sdev_groups = ufshcd_driver_groups,
+ .no_write_same = 1,
+};
+
+static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
+ int ua)
+{
+ int ret;
+
+ if (!vreg)
+ return 0;
+
+ ret = regulator_set_load(vreg->reg, ua);
+ if (ret < 0) {
+ dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
+ __func__, vreg->name, ua, ret);
+ }
+
+ return ret;
+}
+
+static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
+ struct ufs_vreg *vreg)
+{
+ if (!vreg)
+ return 0;
+ else if (vreg->unused)
+ return 0;
+ else
+ return ufshcd_config_vreg_load(hba->dev, vreg,
+ UFS_VREG_LPM_LOAD_UA);
+}
+
+static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
+ struct ufs_vreg *vreg)
+{
+ if (!vreg)
+ return 0;
+ else if (vreg->unused)
+ return 0;
+ else
+ return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
+}
+
+static int ufshcd_config_vreg(struct device *dev,
+ struct ufs_vreg *vreg, bool on)
+{
+ int ret = 0;
+ struct regulator *reg;
+ const char *name;
+ int min_uV, uA_load;
+
+ BUG_ON(!vreg);
+
+ reg = vreg->reg;
+ name = vreg->name;
+
+ if (regulator_count_voltages(reg) > 0) {
+ min_uV = on ? vreg->min_uV : 0;
+ ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
+ if (ret) {
+ dev_err(dev, "%s: %s set voltage failed, err=%d\n",
+ __func__, name, ret);
+ goto out;
+ }
+
+ uA_load = on ? vreg->max_uA : 0;
+ ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
+ if (ret)
+ goto out;
+ }
+out:
+ return ret;
+}
+
+static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
+{
+ int ret = 0;
+
+ if (!vreg)
+ goto out;
+ else if (vreg->enabled || vreg->unused)
+ goto out;
+
+ ret = ufshcd_config_vreg(dev, vreg, true);
+ if (!ret)
+ ret = regulator_enable(vreg->reg);
+
+ if (!ret)
+ vreg->enabled = true;
+ else
+ dev_err(dev, "%s: %s enable failed, err=%d\n",
+ __func__, vreg->name, ret);
+out:
+ return ret;
+}
+
+static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
+{
+ int ret = 0;
+
+ if (!vreg)
+ goto out;
+ else if (!vreg->enabled || vreg->unused)
+ goto out;
+
+ ret = regulator_disable(vreg->reg);
+
+ if (!ret) {
+ /* ignore errors on applying disable config */
+ ufshcd_config_vreg(dev, vreg, false);
+ vreg->enabled = false;
+ } else {
+ dev_err(dev, "%s: %s disable failed, err=%d\n",
+ __func__, vreg->name, ret);
+ }
+out:
+ return ret;
+}
+
+static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
+{
+ int ret = 0;
+ struct device *dev = hba->dev;
+ struct ufs_vreg_info *info = &hba->vreg_info;
+
+ if (!info)
+ goto out;
+
+ ret = ufshcd_toggle_vreg(dev, info->vcc, on);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_toggle_vreg(dev, info->vccq, on);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
+ if (ret)
+ goto out;
+
+out:
+ if (ret) {
+ ufshcd_toggle_vreg(dev, info->vccq2, false);
+ ufshcd_toggle_vreg(dev, info->vccq, false);
+ ufshcd_toggle_vreg(dev, info->vcc, false);
+ }
+ return ret;
+}
+
+static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
+{
+ struct ufs_vreg_info *info = &hba->vreg_info;
+
+ if (info)
+ return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
+
+ return 0;
+}
+
+static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
+{
+ int ret = 0;
+
+ if (!vreg)
+ goto out;
+
+ vreg->reg = devm_regulator_get(dev, vreg->name);
+ if (IS_ERR(vreg->reg)) {
+ ret = PTR_ERR(vreg->reg);
+ dev_err(dev, "%s: %s get failed, err=%d\n",
+ __func__, vreg->name, ret);
+ }
+out:
+ return ret;
+}
+
+static int ufshcd_init_vreg(struct ufs_hba *hba)
+{
+ int ret = 0;
+ struct device *dev = hba->dev;
+ struct ufs_vreg_info *info = &hba->vreg_info;
+
+ if (!info)
+ goto out;
+
+ ret = ufshcd_get_vreg(dev, info->vcc);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_get_vreg(dev, info->vccq);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_get_vreg(dev, info->vccq2);
+out:
+ return ret;
+}
+
+static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
+{
+ struct ufs_vreg_info *info = &hba->vreg_info;
+
+ if (info)
+ return ufshcd_get_vreg(hba->dev, info->vdd_hba);
+
+ return 0;
+}
+
+static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused)
+{
+ int ret = 0;
+ struct ufs_vreg_info *info = &hba->vreg_info;
+
+ if (!info)
+ goto out;
+ else if (!info->vccq)
+ goto out;
+
+ if (unused) {
+ /* shut off the rail here */
+ ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false);
+ /*
+ * Mark this rail as no longer used, so it doesn't get enabled
+ * later by mistake
+ */
+ if (!ret)
+ info->vccq->unused = true;
+ } else {
+ /*
+ * rail should have been already enabled hence just make sure
+ * that unused flag is cleared.
+ */
+ info->vccq->unused = false;
+ }
+out:
+ return ret;
+}
+
+static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
+ bool skip_ref_clk)
+{
+ int ret = 0;
+ struct ufs_clk_info *clki;
+ struct list_head *head = &hba->clk_list_head;
+ unsigned long flags;
+ ktime_t start = ktime_get();
+ bool clk_state_changed = false;
+
+ if (list_empty(head))
+ goto out;
+
+ /*
+ * vendor specific setup_clocks ops may depend on clocks managed by
+ * this standard driver hence call the vendor specific setup_clocks
+ * before disabling the clocks managed here.
+ */
+ if (!on) {
+ ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
+ if (ret)
+ return ret;
+ }
+
+ list_for_each_entry(clki, head, list) {
+ if (!IS_ERR_OR_NULL(clki->clk)) {
+ if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
+ continue;
+
+ clk_state_changed = on ^ clki->enabled;
+ if (on && !clki->enabled) {
+ ret = clk_prepare_enable(clki->clk);
+ if (ret) {
+ dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
+ __func__, clki->name, ret);
+ goto out;
+ }
+ } else if (!on && clki->enabled) {
+ clk_disable_unprepare(clki->clk);
+ }
+ clki->enabled = on;
+ dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
+ clki->name, on ? "en" : "dis");
+ }
+ }
+
+ /*
+ * vendor specific setup_clocks ops may depend on clocks managed by
+ * this standard driver hence call the vendor specific setup_clocks
+ * after enabling the clocks managed here.
+ */
+ if (on) {
+ ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
+ if (ret)
+ return ret;
+ }
+
+out:
+ if (ret) {
+ list_for_each_entry(clki, head, list) {
+ if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
+ clk_disable_unprepare(clki->clk);
+ }
+ } else if (!ret && on) {
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->clk_gating.state = CLKS_ON;
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
+ hba->clk_gating.state);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ }
+
+ if (clk_state_changed)
+ trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
+ (on ? "on" : "off"),
+ ktime_to_us(ktime_sub(ktime_get(), start)), ret);
+ return ret;
+}
+
+static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
+{
+ return __ufshcd_setup_clocks(hba, on, false);
+}
+
+static int ufshcd_init_clocks(struct ufs_hba *hba)
+{
+ int ret = 0;
+ struct ufs_clk_info *clki;
+ struct device *dev = hba->dev;
+ struct list_head *head = &hba->clk_list_head;
+
+ if (list_empty(head))
+ goto out;
+
+ list_for_each_entry(clki, head, list) {
+ if (!clki->name)
+ continue;
+
+ clki->clk = devm_clk_get(dev, clki->name);
+ if (IS_ERR(clki->clk)) {
+ ret = PTR_ERR(clki->clk);
+ dev_err(dev, "%s: %s clk get failed, %d\n",
+ __func__, clki->name, ret);
+ goto out;
+ }
+
+ if (clki->max_freq) {
+ ret = clk_set_rate(clki->clk, clki->max_freq);
+ if (ret) {
+ dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
+ __func__, clki->name,
+ clki->max_freq, ret);
+ goto out;
+ }
+ clki->curr_freq = clki->max_freq;
+ }
+ dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
+ clki->name, clk_get_rate(clki->clk));
+ }
+out:
+ return ret;
+}
+
+static int ufshcd_variant_hba_init(struct ufs_hba *hba)
+{
+ int err = 0;
+
+ if (!hba->vops)
+ goto out;
+
+ err = ufshcd_vops_init(hba);
+ if (err)
+ goto out;
+
+ err = ufshcd_vops_setup_regulators(hba, true);
+ if (err)
+ goto out_exit;
+
+ goto out;
+
+out_exit:
+ ufshcd_vops_exit(hba);
+out:
+ if (err)
+ dev_err(hba->dev, "%s: variant %s init failed err %d\n",
+ __func__, ufshcd_get_var_name(hba), err);
+ return err;
+}
+
+static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
+{
+ if (!hba->vops)
+ return;
+
+ ufshcd_vops_setup_regulators(hba, false);
+
+ ufshcd_vops_exit(hba);
+}
+
+static int ufshcd_hba_init(struct ufs_hba *hba)
+{
+ int err;
+
+ /*
+ * Handle host controller power separately from the UFS device power
+ * rails as it will help controlling the UFS host controller power
+ * collapse easily which is different than UFS device power collapse.
+ * Also, enable the host controller power before we go ahead with rest
+ * of the initialization here.
+ */
+ err = ufshcd_init_hba_vreg(hba);
+ if (err)
+ goto out;
+
+ err = ufshcd_setup_hba_vreg(hba, true);
+ if (err)
+ goto out;
+
+ err = ufshcd_init_clocks(hba);
+ if (err)
+ goto out_disable_hba_vreg;
+
+ err = ufshcd_setup_clocks(hba, true);
+ if (err)
+ goto out_disable_hba_vreg;
+
+ err = ufshcd_init_vreg(hba);
+ if (err)
+ goto out_disable_clks;
+
+ err = ufshcd_setup_vreg(hba, true);
+ if (err)
+ goto out_disable_clks;
+
+ err = ufshcd_variant_hba_init(hba);
+ if (err)
+ goto out_disable_vreg;
+
+ hba->is_powered = true;
+ goto out;
+
+out_disable_vreg:
+ ufshcd_setup_vreg(hba, false);
+out_disable_clks:
+ ufshcd_setup_clocks(hba, false);
+out_disable_hba_vreg:
+ ufshcd_setup_hba_vreg(hba, false);
+out:
+ return err;
+}
+
+static void ufshcd_hba_exit(struct ufs_hba *hba)
+{
+ if (hba->is_powered) {
+ ufshcd_variant_hba_exit(hba);
+ ufshcd_setup_vreg(hba, false);
+ ufshcd_suspend_clkscaling(hba);
+ if (ufshcd_is_clkscaling_supported(hba)) {
+ if (hba->devfreq)
+ ufshcd_suspend_clkscaling(hba);
+ destroy_workqueue(hba->clk_scaling.workq);
+ ufshcd_devfreq_remove(hba);
+ }
+ ufshcd_setup_clocks(hba, false);
+ ufshcd_setup_hba_vreg(hba, false);
+ hba->is_powered = false;
+ }
+}
+
+static int
+ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
+{
+ unsigned char cmd[6] = {REQUEST_SENSE,
+ 0,
+ 0,
+ 0,
+ UFSHCD_REQ_SENSE_SIZE,
+ 0};
+ char *buffer;
+ int ret;
+
+ buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL);
+ if (!buffer) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer,
+ UFSHCD_REQ_SENSE_SIZE, NULL, NULL,
+ msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL);
+ if (ret)
+ pr_err("%s: failed with err %d\n", __func__, ret);
+
+ kfree(buffer);
+out:
+ return ret;
+}
+
+/**
+ * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
+ * power mode
+ * @hba: per adapter instance
+ * @pwr_mode: device power mode to set
+ *
+ * Returns 0 if requested power mode is set successfully
+ * Returns non-zero if failed to set the requested power mode
+ */
+static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
+ enum ufs_dev_pwr_mode pwr_mode)
+{
+ unsigned char cmd[6] = { START_STOP };
+ struct scsi_sense_hdr sshdr;
+ struct scsi_device *sdp;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ sdp = hba->sdev_ufs_device;
+ if (sdp) {
+ ret = scsi_device_get(sdp);
+ if (!ret && !scsi_device_online(sdp)) {
+ ret = -ENODEV;
+ scsi_device_put(sdp);
+ }
+ } else {
+ ret = -ENODEV;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ if (ret)
+ return ret;
+
+ /*
+ * If scsi commands fail, the scsi mid-layer schedules scsi error-
+ * handling, which would wait for host to be resumed. Since we know
+ * we are functional while we are here, skip host resume in error
+ * handling context.
+ */
+ hba->host->eh_noresume = 1;
+ if (hba->wlun_dev_clr_ua) {
+ ret = ufshcd_send_request_sense(hba, sdp);
+ if (ret)
+ goto out;
+ /* Unit attention condition is cleared now */
+ hba->wlun_dev_clr_ua = false;
+ }
+
+ cmd[4] = pwr_mode << 4;
+
+ /*
+ * Current function would be generally called from the power management
+ * callbacks hence set the RQF_PM flag so that it doesn't resume the
+ * already suspended childs.
+ */
+ ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
+ START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
+ if (ret) {
+ sdev_printk(KERN_WARNING, sdp,
+ "START_STOP failed for power mode: %d, result %x\n",
+ pwr_mode, ret);
+ if (driver_byte(ret) & DRIVER_SENSE)
+ scsi_print_sense_hdr(sdp, NULL, &sshdr);
+ }
+
+ if (!ret)
+ hba->curr_dev_pwr_mode = pwr_mode;
+out:
+ scsi_device_put(sdp);
+ hba->host->eh_noresume = 0;
+ return ret;
+}
+
+static int ufshcd_link_state_transition(struct ufs_hba *hba,
+ enum uic_link_state req_link_state,
+ int check_for_bkops)
+{
+ int ret = 0;
+
+ if (req_link_state == hba->uic_link_state)
+ return 0;
+
+ if (req_link_state == UIC_LINK_HIBERN8_STATE) {
+ ret = ufshcd_uic_hibern8_enter(hba);
+ if (!ret)
+ ufshcd_set_link_hibern8(hba);
+ else
+ goto out;
+ }
+ /*
+ * If autobkops is enabled, link can't be turned off because
+ * turning off the link would also turn off the device.
+ */
+ else if ((req_link_state == UIC_LINK_OFF_STATE) &&
+ (!check_for_bkops || (check_for_bkops &&
+ !hba->auto_bkops_enabled))) {
+ /*
+ * Let's make sure that link is in low power mode, we are doing
+ * this currently by putting the link in Hibern8. Otherway to
+ * put the link in low power mode is to send the DME end point
+ * to device and then send the DME reset command to local
+ * unipro. But putting the link in hibern8 is much faster.
+ */
+ ret = ufshcd_uic_hibern8_enter(hba);
+ if (ret)
+ goto out;
+ /*
+ * Change controller state to "reset state" which
+ * should also put the link in off/reset state
+ */
+ ufshcd_hba_stop(hba, true);
+ /*
+ * TODO: Check if we need any delay to make sure that
+ * controller is reset
+ */
+ ufshcd_set_link_off(hba);
+ }
+
+out:
+ return ret;
+}
+
+static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
+{
+ /*
+ * It seems some UFS devices may keep drawing more than sleep current
+ * (atleast for 500us) from UFS rails (especially from VCCQ rail).
+ * To avoid this situation, add 2ms delay before putting these UFS
+ * rails in LPM mode.
+ */
+ if (!ufshcd_is_link_active(hba) &&
+ hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
+ usleep_range(2000, 2100);
+
+ /*
+ * If UFS device is either in UFS_Sleep turn off VCC rail to save some
+ * power.
+ *
+ * If UFS device and link is in OFF state, all power supplies (VCC,
+ * VCCQ, VCCQ2) can be turned off if power on write protect is not
+ * required. If UFS link is inactive (Hibern8 or OFF state) and device
+ * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
+ *
+ * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
+ * in low power state which would save some power.
+ */
+ if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
+ !hba->dev_info.is_lu_power_on_wp) {
+ ufshcd_setup_vreg(hba, false);
+ } else if (!ufshcd_is_ufs_dev_active(hba)) {
+ ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
+ if (!ufshcd_is_link_active(hba)) {
+ ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
+ ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
+ }
+ }
+}
+
+static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
+{
+ int ret = 0;
+
+ if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
+ !hba->dev_info.is_lu_power_on_wp) {
+ ret = ufshcd_setup_vreg(hba, true);
+ } else if (!ufshcd_is_ufs_dev_active(hba)) {
+ if (!ret && !ufshcd_is_link_active(hba)) {
+ ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
+ if (ret)
+ goto vcc_disable;
+ ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
+ if (ret)
+ goto vccq_lpm;
+ }
+ ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
+ }
+ goto out;
+
+vccq_lpm:
+ ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
+vcc_disable:
+ ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
+out:
+ return ret;
+}
+
+static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
+{
+ if (ufshcd_is_link_off(hba))
+ ufshcd_setup_hba_vreg(hba, false);
+}
+
+static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
+{
+ if (ufshcd_is_link_off(hba))
+ ufshcd_setup_hba_vreg(hba, true);
+}
+
+/**
+ * ufshcd_suspend - helper function for suspend operations
+ * @hba: per adapter instance
+ * @pm_op: desired low power operation type
+ *
+ * This function will try to put the UFS device and link into low power
+ * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
+ * (System PM level).
+ *
+ * If this function is called during shutdown, it will make sure that
+ * both UFS device and UFS link is powered off.
+ *
+ * NOTE: UFS device & link must be active before we enter in this function.
+ *
+ * Returns 0 for success and non-zero for failure
+ */
+static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+{
+ int ret = 0;
+ enum ufs_pm_level pm_lvl;
+ enum ufs_dev_pwr_mode req_dev_pwr_mode;
+ enum uic_link_state req_link_state;
+
+ hba->pm_op_in_progress = 1;
+ if (!ufshcd_is_shutdown_pm(pm_op)) {
+ pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
+ hba->rpm_lvl : hba->spm_lvl;
+ req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
+ req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
+ } else {
+ req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
+ req_link_state = UIC_LINK_OFF_STATE;
+ }
+
+ /*
+ * If we can't transition into any of the low power modes
+ * just gate the clocks.
+ */
+ ufshcd_hold(hba, false);
+ hba->clk_gating.is_suspended = true;
+
+ if (hba->clk_scaling.is_allowed) {
+ cancel_work_sync(&hba->clk_scaling.suspend_work);
+ cancel_work_sync(&hba->clk_scaling.resume_work);
+ ufshcd_suspend_clkscaling(hba);
+ }
+
+ if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
+ req_link_state == UIC_LINK_ACTIVE_STATE) {
+ goto disable_clks;
+ }
+
+ if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
+ (req_link_state == hba->uic_link_state))
+ goto enable_gating;
+
+ /* UFS device & link must be active before we enter in this function */
+ if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
+ ret = -EINVAL;
+ goto enable_gating;
+ }
+
+ if (ufshcd_is_runtime_pm(pm_op)) {
+ if (ufshcd_can_autobkops_during_suspend(hba)) {
+ /*
+ * The device is idle with no requests in the queue,
+ * allow background operations if bkops status shows
+ * that performance might be impacted.
+ */
+ ret = ufshcd_urgent_bkops(hba);
+ if (ret)
+ goto enable_gating;
+ } else {
+ /* make sure that auto bkops is disabled */
+ ufshcd_disable_auto_bkops(hba);
+ }
+ }
+
+ if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
+ ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
+ !ufshcd_is_runtime_pm(pm_op))) {
+ /* ensure that bkops is disabled */
+ ufshcd_disable_auto_bkops(hba);
+ ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
+ if (ret)
+ goto enable_gating;
+ }
+
+ ret = ufshcd_link_state_transition(hba, req_link_state, 1);
+ if (ret)
+ goto set_dev_active;
+
+ ufshcd_vreg_set_lpm(hba);
+
+disable_clks:
+ /*
+ * Call vendor specific suspend callback. As these callbacks may access
+ * vendor specific host controller register space call them before the
+ * host clocks are ON.
+ */
+ ret = ufshcd_vops_suspend(hba, pm_op);
+ if (ret)
+ goto set_link_active;
+
+ if (!ufshcd_is_link_active(hba))
+ ufshcd_setup_clocks(hba, false);
+ else
+ /* If link is active, device ref_clk can't be switched off */
+ __ufshcd_setup_clocks(hba, false, true);
+
+ hba->clk_gating.state = CLKS_OFF;
+ trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
+ /*
+ * Disable the host irq as host controller as there won't be any
+ * host controller transaction expected till resume.
+ */
+ ufshcd_disable_irq(hba);
+ /* Put the host controller in low power mode if possible */
+ ufshcd_hba_vreg_set_lpm(hba);
+ goto out;
+
+set_link_active:
+ if (hba->clk_scaling.is_allowed)
+ ufshcd_resume_clkscaling(hba);
+ ufshcd_vreg_set_hpm(hba);
+ if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
+ ufshcd_set_link_active(hba);
+ else if (ufshcd_is_link_off(hba))
+ ufshcd_host_reset_and_restore(hba);
+set_dev_active:
+ if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
+ ufshcd_disable_auto_bkops(hba);
+enable_gating:
+ if (hba->clk_scaling.is_allowed)
+ ufshcd_resume_clkscaling(hba);
+ hba->clk_gating.is_suspended = false;
+ ufshcd_release(hba);
+out:
+ hba->pm_op_in_progress = 0;
+ return ret;
+}
+
+/**
+ * ufshcd_resume - helper function for resume operations
+ * @hba: per adapter instance
+ * @pm_op: runtime PM or system PM
+ *
+ * This function basically brings the UFS device, UniPro link and controller
+ * to active state.
+ *
+ * Returns 0 for success and non-zero for failure
+ */
+static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+{
+ int ret;
+ enum uic_link_state old_link_state;
+
+ hba->pm_op_in_progress = 1;
+ old_link_state = hba->uic_link_state;
+
+ ufshcd_hba_vreg_set_hpm(hba);
+ /* Make sure clocks are enabled before accessing controller */
+ ret = ufshcd_setup_clocks(hba, true);
+ if (ret)
+ goto out;
+
+ /* enable the host irq as host controller would be active soon */
+ ret = ufshcd_enable_irq(hba);
+ if (ret)
+ goto disable_irq_and_vops_clks;
+
+ ret = ufshcd_vreg_set_hpm(hba);
+ if (ret)
+ goto disable_irq_and_vops_clks;
+
+ /*
+ * Call vendor specific resume callback. As these callbacks may access
+ * vendor specific host controller register space call them when the
+ * host clocks are ON.
+ */
+ ret = ufshcd_vops_resume(hba, pm_op);
+ if (ret)
+ goto disable_vreg;
+
+ if (ufshcd_is_link_hibern8(hba)) {
+ ret = ufshcd_uic_hibern8_exit(hba);
+ if (!ret)
+ ufshcd_set_link_active(hba);
+ else
+ goto vendor_suspend;
+ } else if (ufshcd_is_link_off(hba)) {
+ ret = ufshcd_host_reset_and_restore(hba);
+ /*
+ * ufshcd_host_reset_and_restore() should have already
+ * set the link state as active
+ */
+ if (ret || !ufshcd_is_link_active(hba))
+ goto vendor_suspend;
+ }
+
+ if (!ufshcd_is_ufs_dev_active(hba)) {
+ ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
+ if (ret)
+ goto set_old_link_state;
+ }
+
+ if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
+ ufshcd_enable_auto_bkops(hba);
+ else
+ /*
+ * If BKOPs operations are urgently needed at this moment then
+ * keep auto-bkops enabled or else disable it.
+ */
+ ufshcd_urgent_bkops(hba);
+
+ hba->clk_gating.is_suspended = false;
+
+ if (hba->clk_scaling.is_allowed)
+ ufshcd_resume_clkscaling(hba);
+
+ /* Schedule clock gating in case of no access to UFS device yet */
+ ufshcd_release(hba);
+
+ /* Enable Auto-Hibernate if configured */
+ ufshcd_auto_hibern8_enable(hba);
+
+ goto out;
+
+set_old_link_state:
+ ufshcd_link_state_transition(hba, old_link_state, 0);
+vendor_suspend:
+ ufshcd_vops_suspend(hba, pm_op);
+disable_vreg:
+ ufshcd_vreg_set_lpm(hba);
+disable_irq_and_vops_clks:
+ ufshcd_disable_irq(hba);
+ if (hba->clk_scaling.is_allowed)
+ ufshcd_suspend_clkscaling(hba);
+ ufshcd_setup_clocks(hba, false);
+out:
+ hba->pm_op_in_progress = 0;
+ return ret;
+}
+
+/**
+ * ufshcd_system_suspend - system suspend routine
+ * @hba: per adapter instance
+ *
+ * Check the description of ufshcd_suspend() function for more details.
+ *
+ * Returns 0 for success and non-zero for failure
+ */
+int ufshcd_system_suspend(struct ufs_hba *hba)
+{
+ int ret = 0;
+ ktime_t start = ktime_get();
+
+ if (!hba || !hba->is_powered)
+ return 0;
+
+ if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
+ hba->curr_dev_pwr_mode) &&
+ (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
+ hba->uic_link_state))
+ goto out;
+
+ if (pm_runtime_suspended(hba->dev)) {
+ /*
+ * UFS device and/or UFS link low power states during runtime
+ * suspend seems to be different than what is expected during
+ * system suspend. Hence runtime resume the devic & link and
+ * let the system suspend low power states to take effect.
+ * TODO: If resume takes longer time, we might have optimize
+ * it in future by not resuming everything if possible.
+ */
+ ret = ufshcd_runtime_resume(hba);
+ if (ret)
+ goto out;
+ }
+
+ ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
+out:
+ trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)),
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
+ if (!ret)
+ hba->is_sys_suspended = true;
+ return ret;
+}
+EXPORT_SYMBOL(ufshcd_system_suspend);
+
+/**
+ * ufshcd_system_resume - system resume routine
+ * @hba: per adapter instance
+ *
+ * Returns 0 for success and non-zero for failure
+ */
+
+int ufshcd_system_resume(struct ufs_hba *hba)
+{
+ int ret = 0;
+ ktime_t start = ktime_get();
+
+ if (!hba)
+ return -EINVAL;
+
+ if (!hba->is_powered || pm_runtime_suspended(hba->dev))
+ /*
+ * Let the runtime resume take care of resuming
+ * if runtime suspended.
+ */
+ goto out;
+ else
+ ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
+out:
+ trace_ufshcd_system_resume(dev_name(hba->dev), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)),
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
+ return ret;
+}
+EXPORT_SYMBOL(ufshcd_system_resume);
+
+/**
+ * ufshcd_runtime_suspend - runtime suspend routine
+ * @hba: per adapter instance
+ *
+ * Check the description of ufshcd_suspend() function for more details.
+ *
+ * Returns 0 for success and non-zero for failure
+ */
+int ufshcd_runtime_suspend(struct ufs_hba *hba)
+{
+ int ret = 0;
+ ktime_t start = ktime_get();
+
+ if (!hba)
+ return -EINVAL;
+
+ if (!hba->is_powered)
+ goto out;
+ else
+ ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
+out:
+ trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)),
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
+ return ret;
+}
+EXPORT_SYMBOL(ufshcd_runtime_suspend);
+
+/**
+ * ufshcd_runtime_resume - runtime resume routine
+ * @hba: per adapter instance
+ *
+ * This function basically brings the UFS device, UniPro link and controller
+ * to active state. Following operations are done in this function:
+ *
+ * 1. Turn on all the controller related clocks
+ * 2. Bring the UniPro link out of Hibernate state
+ * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
+ * to active state.
+ * 4. If auto-bkops is enabled on the device, disable it.
+ *
+ * So following would be the possible power state after this function return
+ * successfully:
+ * S1: UFS device in Active state with VCC rail ON
+ * UniPro link in Active state
+ * All the UFS/UniPro controller clocks are ON
+ *
+ * Returns 0 for success and non-zero for failure
+ */
+int ufshcd_runtime_resume(struct ufs_hba *hba)
+{
+ int ret = 0;
+ ktime_t start = ktime_get();
+
+ if (!hba)
+ return -EINVAL;
+
+ if (!hba->is_powered)
+ goto out;
+ else
+ ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
+out:
+ trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)),
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
+ return ret;
+}
+EXPORT_SYMBOL(ufshcd_runtime_resume);
+
+int ufshcd_runtime_idle(struct ufs_hba *hba)
+{
+ return 0;
+}
+EXPORT_SYMBOL(ufshcd_runtime_idle);
+
+/**
+ * ufshcd_shutdown - shutdown routine
+ * @hba: per adapter instance
+ *
+ * This function would power off both UFS device and UFS link.
+ *
+ * Returns 0 always to allow force shutdown even in case of errors.
+ */
+int ufshcd_shutdown(struct ufs_hba *hba)
+{
+ int ret = 0;
+
+ if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
+ goto out;
+
+ if (pm_runtime_suspended(hba->dev)) {
+ ret = ufshcd_runtime_resume(hba);
+ if (ret)
+ goto out;
+ }
+
+ ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
+out:
+ if (ret)
+ dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
+ /* allow force shutdown even in case of errors */
+ return 0;
+}
+EXPORT_SYMBOL(ufshcd_shutdown);
+
+/**
+ * ufshcd_remove - de-allocate SCSI host and host memory space
+ * data structure memory
+ * @hba: per adapter instance
+ */
+void ufshcd_remove(struct ufs_hba *hba)
+{
+ ufs_sysfs_remove_nodes(hba->dev);
+ scsi_remove_host(hba->host);
+ /* disable interrupts */
+ ufshcd_disable_intr(hba, hba->intr_mask);
+ ufshcd_hba_stop(hba, true);
+
+ ufshcd_exit_clk_gating(hba);
+ if (ufshcd_is_clkscaling_supported(hba))
+ device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
+ ufshcd_hba_exit(hba);
+}
+EXPORT_SYMBOL_GPL(ufshcd_remove);
+
+/**
+ * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
+ * @hba: pointer to Host Bus Adapter (HBA)
+ */
+void ufshcd_dealloc_host(struct ufs_hba *hba)
+{
+ scsi_host_put(hba->host);
+}
+EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
+
+/**
+ * ufshcd_set_dma_mask - Set dma mask based on the controller
+ * addressing capability
+ * @hba: per adapter instance
+ *
+ * Returns 0 for success, non-zero for failure
+ */
+static int ufshcd_set_dma_mask(struct ufs_hba *hba)
+{
+ if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
+ if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
+ return 0;
+ }
+ return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
+}
+
+/**
+ * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
+ * @dev: pointer to device handle
+ * @hba_handle: driver private handle
+ * Returns 0 on success, non-zero value on failure
+ */
+int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
+{
+ struct Scsi_Host *host;
+ struct ufs_hba *hba;
+ int err = 0;
+
+ if (!dev) {
+ dev_err(dev,
+ "Invalid memory reference for dev is NULL\n");
+ err = -ENODEV;
+ goto out_error;
+ }
+
+ host = scsi_host_alloc(&ufshcd_driver_template,
+ sizeof(struct ufs_hba));
+ if (!host) {
+ dev_err(dev, "scsi_host_alloc failed\n");
+ err = -ENOMEM;
+ goto out_error;
+ }
+ hba = shost_priv(host);
+ hba->host = host;
+ hba->dev = dev;
+ *hba_handle = hba;
+
+ INIT_LIST_HEAD(&hba->clk_list_head);
+
+out_error:
+ return err;
+}
+EXPORT_SYMBOL(ufshcd_alloc_host);
+
+/**
+ * ufshcd_init - Driver initialization routine
+ * @hba: per-adapter instance
+ * @mmio_base: base register address
+ * @irq: Interrupt line of device
+ * Returns 0 on success, non-zero value on failure
+ */
+int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
+{
+ int err;
+ struct Scsi_Host *host = hba->host;
+ struct device *dev = hba->dev;
+
+ if (!mmio_base) {
+ dev_err(hba->dev,
+ "Invalid memory reference for mmio_base is NULL\n");
+ err = -ENODEV;
+ goto out_error;
+ }
+
+ hba->mmio_base = mmio_base;
+ hba->irq = irq;
+
+ /* Set descriptor lengths to specification defaults */
+ ufshcd_def_desc_sizes(hba);
+
+ err = ufshcd_hba_init(hba);
+ if (err)
+ goto out_error;
+
+ /* Read capabilities registers */
+ ufshcd_hba_capabilities(hba);
+
+ /* Get UFS version supported by the controller */
+ hba->ufs_version = ufshcd_get_ufs_version(hba);
+
+ if ((hba->ufs_version != UFSHCI_VERSION_10) &&
+ (hba->ufs_version != UFSHCI_VERSION_11) &&
+ (hba->ufs_version != UFSHCI_VERSION_20) &&
+ (hba->ufs_version != UFSHCI_VERSION_21))
+ dev_err(hba->dev, "invalid UFS version 0x%x\n",
+ hba->ufs_version);
+
+ /* Get Interrupt bit mask per version */
+ hba->intr_mask = ufshcd_get_intr_mask(hba);
+
+ err = ufshcd_set_dma_mask(hba);
+ if (err) {
+ dev_err(hba->dev, "set dma mask failed\n");
+ goto out_disable;
+ }
+
+ /* Allocate memory for host memory space */
+ err = ufshcd_memory_alloc(hba);
+ if (err) {
+ dev_err(hba->dev, "Memory allocation failed\n");
+ goto out_disable;
+ }
+
+ /* Configure LRB */
+ ufshcd_host_memory_configure(hba);
+
+ host->can_queue = hba->nutrs;
+ host->cmd_per_lun = hba->nutrs;
+ host->max_id = UFSHCD_MAX_ID;
+ host->max_lun = UFS_MAX_LUNS;
+ host->max_channel = UFSHCD_MAX_CHANNEL;
+ host->unique_id = host->host_no;
+ host->max_cmd_len = MAX_CDB_SIZE;
+
+ hba->max_pwr_info.is_valid = false;
+
+ /* Initailize wait queue for task management */
+ init_waitqueue_head(&hba->tm_wq);
+ init_waitqueue_head(&hba->tm_tag_wq);
+
+ /* Initialize work queues */
+ INIT_WORK(&hba->eh_work, ufshcd_err_handler);
+ INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
+
+ /* Initialize UIC command mutex */
+ mutex_init(&hba->uic_cmd_mutex);
+
+ /* Initialize mutex for device management commands */
+ mutex_init(&hba->dev_cmd.lock);
+
+ init_rwsem(&hba->clk_scaling_lock);
+
+ /* Initialize device management tag acquire wait queue */
+ init_waitqueue_head(&hba->dev_cmd.tag_wq);
+
+ ufshcd_init_clk_gating(hba);
+
+ /*
+ * In order to avoid any spurious interrupt immediately after
+ * registering UFS controller interrupt handler, clear any pending UFS
+ * interrupt status and disable all the UFS interrupts.
+ */
+ ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
+ REG_INTERRUPT_STATUS);
+ ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
+ /*
+ * Make sure that UFS interrupts are disabled and any pending interrupt
+ * status is cleared before registering UFS interrupt handler.
+ */
+ mb();
+
+ /* IRQ registration */
+ err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
+ if (err) {
+ dev_err(hba->dev, "request irq failed\n");
+ goto exit_gating;
+ } else {
+ hba->is_irq_enabled = true;
+ }
+
+ err = scsi_add_host(host, hba->dev);
+ if (err) {
+ dev_err(hba->dev, "scsi_add_host failed\n");
+ goto exit_gating;
+ }
+
+ /* Host controller enable */
+ err = ufshcd_hba_enable(hba);
+ if (err) {
+ dev_err(hba->dev, "Host controller enable failed\n");
+ ufshcd_print_host_regs(hba);
+ ufshcd_print_host_state(hba);
+ goto out_remove_scsi_host;
+ }
+
+ if (ufshcd_is_clkscaling_supported(hba)) {
+ char wq_name[sizeof("ufs_clkscaling_00")];
+
+ INIT_WORK(&hba->clk_scaling.suspend_work,
+ ufshcd_clk_scaling_suspend_work);
+ INIT_WORK(&hba->clk_scaling.resume_work,
+ ufshcd_clk_scaling_resume_work);
+
+ snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
+ host->host_no);
+ hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
+
+ ufshcd_clkscaling_init_sysfs(hba);
+ }
+
+ /*
+ * Set the default power management level for runtime and system PM.
+ * Default power saving mode is to keep UFS link in Hibern8 state
+ * and UFS device in sleep state.
+ */
+ hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+ UFS_SLEEP_PWR_MODE,
+ UIC_LINK_HIBERN8_STATE);
+ hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+ UFS_SLEEP_PWR_MODE,
+ UIC_LINK_HIBERN8_STATE);
+
+ /* Set the default auto-hiberate idle timer value to 150 ms */
+ if (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) {
+ hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
+ FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
+ }
+
+ /* Hold auto suspend until async scan completes */
+ pm_runtime_get_sync(dev);
+ atomic_set(&hba->scsi_block_reqs_cnt, 0);
+ /*
+ * We are assuming that device wasn't put in sleep/power-down
+ * state exclusively during the boot stage before kernel.
+ * This assumption helps avoid doing link startup twice during
+ * ufshcd_probe_hba().
+ */
+ ufshcd_set_ufs_dev_active(hba);
+
+ async_schedule(ufshcd_async_scan, hba);
+ ufs_sysfs_add_nodes(hba->dev);
+
+ return 0;
+
+out_remove_scsi_host:
+ scsi_remove_host(hba->host);
+exit_gating:
+ ufshcd_exit_clk_gating(hba);
+out_disable:
+ hba->is_irq_enabled = false;
+ ufshcd_hba_exit(hba);
+out_error:
+ return err;
+}
+EXPORT_SYMBOL_GPL(ufshcd_init);
+
+MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
+MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
+MODULE_DESCRIPTION("Generic UFS host controller driver Core");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/rr-cache/0403d426b570cfb211d5c6743c695f2ab374c7e4/postimage b/rr-cache/0403d426b570cfb211d5c6743c695f2ab374c7e4/postimage
new file mode 100644
index 0000000..db1bab3
--- /dev/null
+++ b/rr-cache/0403d426b570cfb211d5c6743c695f2ab374c7e4/postimage
@@ -0,0 +1,736 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_NUMA_BALANCING=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_USER_NS=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_JUMP_LABEL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_ARCH_SUNXI=y
+CONFIG_ARCH_ALPINE=y
+CONFIG_ARCH_BCM2835=y
+CONFIG_ARCH_BCM_IPROC=y
+CONFIG_ARCH_BERLIN=y
+CONFIG_ARCH_BRCMSTB=y
+CONFIG_ARCH_EXYNOS=y
+CONFIG_ARCH_LAYERSCAPE=y
+CONFIG_ARCH_LG1K=y
+CONFIG_ARCH_HISI=y
+CONFIG_ARCH_MEDIATEK=y
+CONFIG_ARCH_MESON=y
+CONFIG_ARCH_MVEBU=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_ROCKCHIP=y
+CONFIG_ARCH_SEATTLE=y
+CONFIG_ARCH_RENESAS=y
+CONFIG_ARCH_R8A7795=y
+CONFIG_ARCH_R8A7796=y
+CONFIG_ARCH_R8A77965=y
+CONFIG_ARCH_R8A77970=y
+CONFIG_ARCH_R8A77980=y
+CONFIG_ARCH_R8A77990=y
+CONFIG_ARCH_R8A77995=y
+CONFIG_ARCH_STRATIX10=y
+CONFIG_ARCH_TEGRA=y
+CONFIG_ARCH_SPRD=y
+CONFIG_ARCH_SYNQUACER=y
+CONFIG_ARCH_THUNDER=y
+CONFIG_ARCH_THUNDER2=y
+CONFIG_ARCH_UNIPHIER=y
+CONFIG_ARCH_VEXPRESS=y
+CONFIG_ARCH_XGENE=y
+CONFIG_ARCH_ZX=y
+CONFIG_ARCH_ZYNQMP=y
+CONFIG_PCI=y
+CONFIG_HOTPLUG_PCI_PCIE=y
+CONFIG_PCI_IOV=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_ACPI=y
+CONFIG_PCI_LAYERSCAPE=y
+CONFIG_PCI_HISI=y
+CONFIG_PCIE_QCOM=y
+CONFIG_PCIE_KIRIN=y
+CONFIG_PCIE_ARMADA_8K=y
+CONFIG_PCIE_HISI_STB=y
+CONFIG_PCI_AARDVARK=y
+CONFIG_PCI_TEGRA=y
+CONFIG_PCIE_RCAR=y
+CONFIG_PCIE_ROCKCHIP=y
+CONFIG_PCIE_ROCKCHIP_HOST=m
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PCI_XGENE=y
+CONFIG_PCI_HOST_THUNDER_PEM=y
+CONFIG_PCI_HOST_THUNDER_ECAM=y
+CONFIG_ARM64_VA_BITS_48=y
+CONFIG_SCHED_MC=y
+CONFIG_NUMA=y
+CONFIG_PREEMPT=y
+CONFIG_KSM=y
+CONFIG_MEMORY_FAILURE=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CMA=y
+CONFIG_SECCOMP=y
+CONFIG_KEXEC=y
+CONFIG_CRASH_DUMP=y
+CONFIG_XEN=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_COMPAT=y
+CONFIG_HIBERNATION=y
+CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_ATTR_SET=y
+CONFIG_CPU_FREQ_GOV_COMMON=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=m
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_CPUFREQ_DT=y
+CONFIG_ARM_ARMADA_37XX_CPUFREQ=y
+CONFIG_ARM_BIG_LITTLE_CPUFREQ=y
+CONFIG_ARM_SCPI_CPUFREQ=y
+CONFIG_ARM_TEGRA186_CPUFREQ=y
+CONFIG_ACPI_CPPC_CPUFREQ=m
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IPV6=m
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_NAT=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_BRIDGE=m
+CONFIG_BRIDGE_VLAN_FILTERING=y
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_VLAN_8021Q_MVRP=y
+CONFIG_BPF_JIT=y
+CONFIG_BT=m
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=m
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=m
+# CONFIG_BT_HS is not set
+# CONFIG_BT_LE is not set
+CONFIG_BT_LEDS=y
+# CONFIG_BT_DEBUGFS is not set
+CONFIG_BT_HCIBTUSB=m
+CONFIG_BT_HCIBTSDIO=m
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_BCSP=y
+CONFIG_BT_HCIUART_LL=y
+CONFIG_BT_HCIUART_3WIRE=y
+CONFIG_BT_HCIUART_BCM=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_WEXT=y
+CONFIG_MAC80211=y
+CONFIG_MAC80211_LEDS=y
+CONFIG_RFKILL=y
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DMA_CMA=y
+CONFIG_SIMPLE_PM_BUS=y
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_DENALI_DT=y
+CONFIG_MTD_NAND_MARVELL=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_NBD=m
+CONFIG_VIRTIO_BLK=y
+CONFIG_BLK_DEV_NVME=m
+CONFIG_SRAM=y
+CONFIG_EEPROM_AT25=m
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+CONFIG_SCSI_SAS_ATA=y
+CONFIG_SCSI_HISI_SAS=y
+CONFIG_SCSI_HISI_SAS_PCI=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_AHCI_CEVA=y
+CONFIG_AHCI_MVEBU=y
+CONFIG_AHCI_XGENE=y
+CONFIG_AHCI_QORIQ=y
+CONFIG_SATA_SIL24=y
+CONFIG_SATA_RCAR=y
+CONFIG_PATA_PLATFORM=y
+CONFIG_PATA_OF_PLATFORM=y
+CONFIG_NETDEVICES=y
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_TUN=y
+CONFIG_VETH=m
+CONFIG_VIRTIO_NET=y
+CONFIG_AMD_XGBE=y
+CONFIG_NET_XGENE=y
+CONFIG_ATL1C=y
+CONFIG_MACB=y
+CONFIG_THUNDER_NIC_PF=y
+CONFIG_HIX5HD2_GMAC=y
+CONFIG_HNS_DSAF=y
+CONFIG_HNS_ENET=y
+CONFIG_E1000E=y
+CONFIG_IGB=y
+CONFIG_IGBVF=y
+CONFIG_MVNETA=y
+CONFIG_MVPP2=y
+CONFIG_SKY2=y
+CONFIG_QCOM_EMAC=m
+CONFIG_RAVB=y
+CONFIG_SMC91X=y
+CONFIG_SMSC911X=y
+CONFIG_SNI_AVE=y
+CONFIG_SNI_NETSEC=y
+CONFIG_STMMAC_ETH=m
+CONFIG_DWMAC_IPQ806X=m
+CONFIG_DWMAC_MESON=m
+CONFIG_DWMAC_ROCKCHIP=m
+CONFIG_DWMAC_SUNXI=m
+CONFIG_DWMAC_SUN8I=m
+CONFIG_MDIO_BUS_MUX_MMIOREG=y
+CONFIG_AT803X_PHY=m
+CONFIG_MARVELL_PHY=m
+CONFIG_MARVELL_10G_PHY=m
+CONFIG_MESON_GXL_PHY=m
+CONFIG_MICREL_PHY=y
+CONFIG_REALTEK_PHY=m
+CONFIG_ROCKCHIP_PHY=y
+CONFIG_USB_PEGASUS=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_RTL8152=m
+CONFIG_USB_LAN78XX=m
+CONFIG_USB_USBNET=m
+CONFIG_USB_NET_DM9601=m
+CONFIG_USB_NET_SR9800=m
+CONFIG_USB_NET_SMSC75XX=m
+CONFIG_USB_NET_SMSC95XX=m
+CONFIG_USB_NET_PLUSB=m
+CONFIG_USB_NET_MCS7830=m
+CONFIG_ATH10K=y
+CONFIG_ATH10K_PCI=y
+CONFIG_BRCMFMAC=m
+CONFIG_MWIFIEX=m
+CONFIG_MWIFIEX_PCIE=m
+CONFIG_WL18XX=m
+CONFIG_WLCORE_SDIO=m
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_ADC=m
+CONFIG_KEYBOARD_CROS_EC=y
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ATMEL_MXT=m
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_PM8941_PWRKEY=y
+CONFIG_INPUT_HISI_POWERKEY=y
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_AMBAKMI=y
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_BCM2835AUX=y
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_8250_MT6577=y
+CONFIG_SERIAL_8250_UNIPHIER=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_MESON=y
+CONFIG_SERIAL_MESON_CONSOLE=y
+CONFIG_SERIAL_SAMSUNG=y
+CONFIG_SERIAL_SAMSUNG_CONSOLE=y
+CONFIG_SERIAL_TEGRA=y
+CONFIG_SERIAL_SH_SCI=y
+CONFIG_SERIAL_SH_SCI_NR_UARTS=11
+CONFIG_SERIAL_SH_SCI_CONSOLE=y
+CONFIG_SERIAL_MSM=y
+CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_SERIAL_XILINX_PS_UART=y
+CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
+CONFIG_SERIAL_MVEBU_UART=y
+CONFIG_SERIAL_DEV_BUS=y
+CONFIG_SERIAL_DEV_CTRL_TTYPORT=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_I2C_HID=m
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_MUX_PCA954x=y
+CONFIG_I2C_BCM2835=m
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
+CONFIG_I2C_IMX=y
+CONFIG_I2C_MESON=y
+CONFIG_I2C_MV64XXX=y
+CONFIG_I2C_PXA=y
+CONFIG_I2C_QUP=y
+CONFIG_I2C_RK3X=y
+CONFIG_I2C_SH_MOBILE=y
+CONFIG_I2C_TEGRA=y
+CONFIG_I2C_UNIPHIER_F=y
+CONFIG_I2C_RCAR=y
+CONFIG_I2C_CROS_EC_TUNNEL=y
+CONFIG_SPI=y
+CONFIG_SPI_ARMADA_3700=y
+CONFIG_SPI_MESON_SPICC=m
+CONFIG_SPI_MESON_SPIFC=m
+CONFIG_SPI_BCM2835=m
+CONFIG_SPI_BCM2835AUX=m
+CONFIG_SPI_ORION=y
+CONFIG_SPI_PL022=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_ROCKCHIP=y
+CONFIG_SPI_S3C64XX=y
+CONFIG_SPI_SPIDEV=m
+CONFIG_SPMI=y
+CONFIG_PINCTRL_IPQ8074=y
+CONFIG_PINCTRL_SINGLE=y
+CONFIG_PINCTRL_MAX77620=y
+CONFIG_PINCTRL_MSM8916=y
+CONFIG_PINCTRL_MSM8994=y
+CONFIG_PINCTRL_MSM8996=y
+CONFIG_PINCTRL_MT7622=y
+CONFIG_PINCTRL_QDF2XXX=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_GPIO_DWAPB=y
+CONFIG_GPIO_MB86S7X=y
+CONFIG_GPIO_PL061=y
+CONFIG_GPIO_RCAR=y
+CONFIG_GPIO_UNIPHIER=y
+CONFIG_GPIO_XGENE=y
+CONFIG_GPIO_XGENE_SB=y
+CONFIG_GPIO_PCA953X=y
+CONFIG_GPIO_PCA953X_IRQ=y
+CONFIG_GPIO_MAX77620=y
+CONFIG_POWER_AVS=y
+CONFIG_ROCKCHIP_IODOMAIN=y
+CONFIG_POWER_RESET_MSM=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_SYSCON_REBOOT_MODE=y
+CONFIG_BATTERY_BQ27XXX=y
+CONFIG_SENSORS_ARM_SCPI=y
+CONFIG_SENSORS_LM90=m
+CONFIG_SENSORS_INA2XX=m
+CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
+CONFIG_CPU_THERMAL=y
+CONFIG_THERMAL_EMULATION=y
+CONFIG_ARMADA_THERMAL=y
+CONFIG_BRCMSTB_THERMAL=m
+CONFIG_EXYNOS_THERMAL=y
+CONFIG_RCAR_GEN3_THERMAL=y
+CONFIG_QCOM_TSENS=y
+CONFIG_ROCKCHIP_THERMAL=m
+CONFIG_TEGRA_BPMP_THERMAL=m
+CONFIG_UNIPHIER_THERMAL=y
+CONFIG_WATCHDOG=y
+CONFIG_S3C2410_WATCHDOG=y
+CONFIG_MESON_GXBB_WATCHDOG=m
+CONFIG_MESON_WATCHDOG=m
+CONFIG_RENESAS_WDT=y
+CONFIG_UNIPHIER_WATCHDOG=y
+CONFIG_BCM2835_WDT=y
+CONFIG_MFD_AXP20X_RSB=y
+CONFIG_MFD_CROS_EC=y
+CONFIG_MFD_CROS_EC_I2C=y
+CONFIG_MFD_CROS_EC_SPI=y
+CONFIG_MFD_CROS_EC_CHARDEV=m
+CONFIG_MFD_EXYNOS_LPASS=m
+CONFIG_MFD_HI6421_PMIC=y
+CONFIG_MFD_HI655X_PMIC=y
+CONFIG_MFD_MAX77620=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_RK808=y
+CONFIG_MFD_SEC_CORE=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_AXP20X=y
+CONFIG_REGULATOR_FAN53555=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_GPIO=y
+CONFIG_REGULATOR_HI6421V530=y
+CONFIG_REGULATOR_HI655X=y
+CONFIG_REGULATOR_MAX77620=y
+CONFIG_REGULATOR_PWM=y
+CONFIG_REGULATOR_QCOM_SMD_RPM=y
+CONFIG_REGULATOR_QCOM_SPMI=y
+CONFIG_REGULATOR_RK808=y
+CONFIG_REGULATOR_S2MPS11=y
+CONFIG_MEDIA_SUPPORT=m
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_ANALOG_TV_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_MEDIA_RC_SUPPORT=y
+CONFIG_RC_CORE=m
+CONFIG_RC_DEVICES=y
+CONFIG_RC_DECODERS=y
+CONFIG_IR_MESON=m
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+# CONFIG_DVB_NET is not set
+CONFIG_V4L_MEM2MEM_DRIVERS=y
+CONFIG_VIDEO_SAMSUNG_S5P_JPEG=m
+CONFIG_VIDEO_SAMSUNG_S5P_MFC=m
+CONFIG_VIDEO_SAMSUNG_EXYNOS_GSC=m
+CONFIG_VIDEO_RENESAS_FCP=m
+CONFIG_VIDEO_RENESAS_VSP1=m
+CONFIG_DRM=y
+CONFIG_DRM_NOUVEAU=m
+CONFIG_DRM_EXYNOS=m
+CONFIG_DRM_EXYNOS5433_DECON=y
+CONFIG_DRM_EXYNOS7_DECON=y
+CONFIG_DRM_EXYNOS_DSI=y
+# CONFIG_DRM_EXYNOS_DP is not set
+CONFIG_DRM_EXYNOS_HDMI=y
+CONFIG_DRM_EXYNOS_MIC=y
+CONFIG_DRM_ROCKCHIP=m
+CONFIG_ROCKCHIP_ANALOGIX_DP=y
+CONFIG_ROCKCHIP_CDN_DP=y
+CONFIG_ROCKCHIP_DW_HDMI=y
+CONFIG_ROCKCHIP_DW_MIPI_DSI=y
+CONFIG_ROCKCHIP_INNO_HDMI=y
+CONFIG_DRM_RCAR_DU=m
+CONFIG_DRM_RCAR_LVDS=y
+CONFIG_DRM_RCAR_VSP=y
+CONFIG_DRM_TEGRA=m
+CONFIG_DRM_PANEL_SIMPLE=m
+CONFIG_DRM_I2C_ADV7511=m
+CONFIG_DRM_I2C_ADV7511_AUDIO=y
+CONFIG_DRM_VC4=m
+CONFIG_DRM_HISI_HIBMC=m
+CONFIG_DRM_HISI_KIRIN=m
+CONFIG_DRM_MESON=m
+CONFIG_FB=y
+CONFIG_FB_ARMCLCD=y
+CONFIG_BACKLIGHT_GENERIC=m
+CONFIG_BACKLIGHT_PWM=m
+CONFIG_BACKLIGHT_LP855X=m
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SND_BCM2835_SOC_I2S=m
+CONFIG_SND_SOC_QCOM=y
+CONFIG_SND_SOC_APQ8016_SBC=y
+CONFIG_SND_SOC_QDSP6=y
+CONFIG_SND_SOC_MSM8996=y
+CONFIG_SND_SOC_SAMSUNG=y
+CONFIG_SND_SOC_RCAR=m
+CONFIG_SND_SOC_AK4613=m
+CONFIG_SND_SIMPLE_CARD=y
+CONFIG_SND_AUDIO_GRAPH_CARD=m
+CONFIG_SND_SOC_MSM8916_WCD_ANALOG=y
+CONFIG_SND_SOC_MSM8916_WCD_DIGITAL=y
+CONFIG_USB=y
+CONFIG_USB_OTG=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_TEGRA=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_EXYNOS=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_EXYNOS=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_RENESAS_USBHS=m
+CONFIG_USB_STORAGE=y
+CONFIG_USB_MUSB_HDRC=y
+CONFIG_USB_MUSB_SUNXI=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC2=y
+CONFIG_USB_CHIPIDEA=y
+CONFIG_USB_CHIPIDEA_UDC=y
+CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_USB_CHIPIDEA_ULPI=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_HSIC_USB3503=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_ULPI=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_RENESAS_USBHS_UDC=m
+CONFIG_USB_RENESAS_USB3=m
+CONFIG_USB_ULPI_BUS=y
+CONFIG_MMC=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_ARMMMCI=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_ACPI=y
+CONFIG_MMC_SDHCI_F_SDH30=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_OF_ARASAN=y
+CONFIG_MMC_SDHCI_OF_ESDHC=y
+CONFIG_MMC_SDHCI_CADENCE=y
+CONFIG_MMC_SDHCI_TEGRA=y
+CONFIG_MMC_MESON_GX=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_SPI=y
+CONFIG_MMC_SDHI=y
+CONFIG_MMC_DW=y
+CONFIG_MMC_DW_EXYNOS=y
+CONFIG_MMC_DW_HI3798CV200=y
+CONFIG_MMC_DW_K3=y
+CONFIG_MMC_DW_ROCKCHIP=y
+CONFIG_MMC_SUNXI=y
+CONFIG_MMC_BCM2835=y
+CONFIG_MMC_SDHCI_XENON=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_PWM=y
+CONFIG_LEDS_SYSCON=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_LEDS_TRIGGER_PANIC=y
+CONFIG_LEDS_TRIGGER_DISK=y
+CONFIG_EDAC=y
+CONFIG_EDAC_GHES=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_MAX77686=y
+CONFIG_RTC_DRV_RK808=m
+CONFIG_RTC_DRV_S5M=y
+CONFIG_RTC_DRV_DS3232=y
+CONFIG_RTC_DRV_EFI=y
+CONFIG_RTC_DRV_S3C=y
+CONFIG_RTC_DRV_PL031=y
+CONFIG_RTC_DRV_SUN6I=y
+CONFIG_RTC_DRV_ARMADA38X=y
+CONFIG_RTC_DRV_TEGRA=y
+CONFIG_RTC_DRV_XGENE=y
+CONFIG_RTC_DRV_CROS_EC=y
+CONFIG_DMADEVICES=y
+CONFIG_DMA_BCM2835=m
+CONFIG_K3_DMA=y
+CONFIG_MV_XOR_V2=y
+CONFIG_PL330_DMA=y
+CONFIG_TEGRA20_APB_DMA=y
+CONFIG_QCOM_BAM_DMA=y
+CONFIG_QCOM_HIDMA_MGMT=y
+CONFIG_QCOM_HIDMA=y
+CONFIG_RCAR_DMAC=y
+CONFIG_RENESAS_USB_DMAC=m
+CONFIG_VFIO=y
+CONFIG_VFIO_PCI=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_XEN_GNTDEV=y
+CONFIG_XEN_GRANT_DEV_ALLOC=y
+CONFIG_COMMON_CLK_RK808=y
+CONFIG_COMMON_CLK_SCPI=y
+CONFIG_COMMON_CLK_CS2000_CP=y
+CONFIG_COMMON_CLK_S2MPS11=y
+CONFIG_CLK_QORIQ=y
+CONFIG_COMMON_CLK_PWM=y
+CONFIG_COMMON_CLK_QCOM=y
+CONFIG_QCOM_CLK_SMD_RPM=y
+CONFIG_IPQ_GCC_8074=y
+CONFIG_MSM_GCC_8916=y
+CONFIG_MSM_GCC_8994=y
+CONFIG_MSM_MMCC_8996=y
+CONFIG_HWSPINLOCK=y
+CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_ARM_MHU=y
+CONFIG_PLATFORM_MHU=y
+CONFIG_BCM2835_MBOX=y
+CONFIG_HI6220_MBOX=y
+CONFIG_QCOM_APCS_IPC=y
+CONFIG_ROCKCHIP_IOMMU=y
+CONFIG_TEGRA_IOMMU_SMMU=y
+CONFIG_ARM_SMMU=y
+CONFIG_ARM_SMMU_V3=y
+CONFIG_QCOM_IOMMU=y
+CONFIG_REMOTEPROC=y
+CONFIG_QCOM_ADSP_PIL=y
+CONFIG_QCOM_Q6V5_PIL=y
+CONFIG_QCOM_WCNSS_PIL=y
+CONFIG_RPMSG_QCOM_GLINK_RPM=y
+CONFIG_RPMSG_QCOM_GLINK_SMEM=y
+CONFIG_RPMSG_QCOM_SMD=y
+CONFIG_RASPBERRYPI_POWER=y
+CONFIG_QCOM_SMEM=y
+CONFIG_QCOM_SMD_RPM=y
+CONFIG_QCOM_SMP2P=y
+CONFIG_QCOM_SMSM=y
+CONFIG_QCOM_WCNSS_CTRL=y
+CONFIG_QCOM_APR=y
+CONFIG_ROCKCHIP_PM_DOMAINS=y
+CONFIG_ARCH_TEGRA_132_SOC=y
+CONFIG_ARCH_TEGRA_210_SOC=y
+CONFIG_ARCH_TEGRA_186_SOC=y
+CONFIG_ARCH_TEGRA_194_SOC=y
+CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
+CONFIG_EXTCON_USB_GPIO=y
+CONFIG_EXTCON_USBC_CROS_EC=y
+CONFIG_MEMORY=y
+CONFIG_TEGRA_MC=y
+CONFIG_IIO=y
+CONFIG_EXYNOS_ADC=y
+CONFIG_ROCKCHIP_SARADC=m
+CONFIG_IIO_CROS_EC_SENSORS_CORE=m
+CONFIG_IIO_CROS_EC_SENSORS=m
+CONFIG_IIO_CROS_EC_LIGHT_PROX=m
+CONFIG_IIO_CROS_EC_BARO=m
+CONFIG_PWM=y
+CONFIG_PWM_BCM2835=m
+CONFIG_PWM_CROS_EC=m
+CONFIG_PWM_MESON=m
+CONFIG_PWM_RCAR=m
+CONFIG_PWM_ROCKCHIP=y
+CONFIG_PWM_SAMSUNG=y
+CONFIG_PWM_TEGRA=m
+CONFIG_PHY_HISTB_COMBPHY=y
+CONFIG_PHY_HISI_INNO_USB2=y
+CONFIG_PHY_RCAR_GEN3_USB2=y
+CONFIG_PHY_RCAR_GEN3_USB3=m
+CONFIG_PHY_HI6220_USB=y
+CONFIG_PHY_QCOM_QMP=y
+CONFIG_PHY_QCOM_USB_HS=y
+CONFIG_PHY_SUN4I_USB=y
+CONFIG_PHY_MVEBU_CP110_COMPHY=y
+CONFIG_PHY_QCOM_QMP=m
+CONFIG_PHY_ROCKCHIP_INNO_USB2=y
+CONFIG_PHY_ROCKCHIP_EMMC=y
+CONFIG_PHY_ROCKCHIP_PCIE=m
+CONFIG_PHY_ROCKCHIP_TYPEC=y
+CONFIG_PHY_XGENE=y
+CONFIG_PHY_TEGRA_XUSB=y
+CONFIG_QCOM_L2_PMU=y
+CONFIG_QCOM_L3_PMU=y
+CONFIG_MESON_EFUSE=m
+CONFIG_QCOM_QFPROM=y
+CONFIG_ROCKCHIP_EFUSE=y
+CONFIG_UNIPHIER_EFUSE=y
+CONFIG_TEE=y
+CONFIG_OPTEE=y
+CONFIG_ARM_SCPI_PROTOCOL=y
+CONFIG_RASPBERRYPI_FIRMWARE=y
+CONFIG_EFI_CAPSULE_LOADER=y
+CONFIG_ACPI=y
+CONFIG_ACPI_APEI=y
+CONFIG_ACPI_APEI_GHES=y
+CONFIG_ACPI_APEI_PCIEAER=y
+CONFIG_ACPI_APEI_MEMORY_FAILURE=y
+CONFIG_ACPI_APEI_EINJ=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
+CONFIG_QUOTA=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
+CONFIG_VFAT_FS=y
+CONFIG_HUGETLBFS=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_EFIVAR_FS=y
+CONFIG_SQUASHFS=y
+CONFIG_UFS_FS=y
+CONFIG_UFS_FS_WRITE=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
+CONFIG_9P_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_LOCKUP_DETECTOR=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_FTRACE is not set
+CONFIG_MEMTEST=y
+CONFIG_SECURITY=y
+CONFIG_CRYPTO_ECHAINIV=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA256_ARM64=m
+CONFIG_CRYPTO_SHA512_ARM64=m
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m
+CONFIG_CRYPTO_CRC32_ARM64_CE=m
+CONFIG_CRYPTO_AES_ARM64=m
+CONFIG_CRYPTO_AES_ARM64_CE=m
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=m
+CONFIG_CRYPTO_CHACHA20_NEON=m
+CONFIG_CRYPTO_AES_ARM64_BS=m
+CONFIG_CRYPTO_SHA512_ARM64_CE=m
+CONFIG_CRYPTO_SHA3_ARM64=m
+CONFIG_CRYPTO_SM3_ARM64_CE=m
diff --git a/rr-cache/0403d426b570cfb211d5c6743c695f2ab374c7e4/preimage b/rr-cache/0403d426b570cfb211d5c6743c695f2ab374c7e4/preimage
new file mode 100644
index 0000000..bf27de5
--- /dev/null
+++ b/rr-cache/0403d426b570cfb211d5c6743c695f2ab374c7e4/preimage
@@ -0,0 +1,777 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_NUMA_BALANCING=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_USER_NS=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_JUMP_LABEL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_ARCH_SUNXI=y
+CONFIG_ARCH_ALPINE=y
+CONFIG_ARCH_BCM2835=y
+CONFIG_ARCH_BCM_IPROC=y
+CONFIG_ARCH_BERLIN=y
+CONFIG_ARCH_BRCMSTB=y
+CONFIG_ARCH_EXYNOS=y
+CONFIG_ARCH_LAYERSCAPE=y
+CONFIG_ARCH_LG1K=y
+CONFIG_ARCH_HISI=y
+CONFIG_ARCH_MEDIATEK=y
+CONFIG_ARCH_MESON=y
+CONFIG_ARCH_MVEBU=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_ROCKCHIP=y
+CONFIG_ARCH_SEATTLE=y
+CONFIG_ARCH_RENESAS=y
+CONFIG_ARCH_R8A7795=y
+CONFIG_ARCH_R8A7796=y
+CONFIG_ARCH_R8A77965=y
+CONFIG_ARCH_R8A77970=y
+CONFIG_ARCH_R8A77980=y
+CONFIG_ARCH_R8A77990=y
+CONFIG_ARCH_R8A77995=y
+CONFIG_ARCH_STRATIX10=y
+CONFIG_ARCH_TEGRA=y
+CONFIG_ARCH_SPRD=y
+CONFIG_ARCH_SYNQUACER=y
+CONFIG_ARCH_THUNDER=y
+CONFIG_ARCH_THUNDER2=y
+CONFIG_ARCH_UNIPHIER=y
+CONFIG_ARCH_VEXPRESS=y
+CONFIG_ARCH_XGENE=y
+CONFIG_ARCH_ZX=y
+CONFIG_ARCH_ZYNQMP=y
+CONFIG_PCI=y
+CONFIG_HOTPLUG_PCI_PCIE=y
+CONFIG_PCI_IOV=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_ACPI=y
+CONFIG_PCI_LAYERSCAPE=y
+CONFIG_PCI_HISI=y
+CONFIG_PCIE_QCOM=y
+CONFIG_PCIE_KIRIN=y
+CONFIG_PCIE_ARMADA_8K=y
+CONFIG_PCIE_HISI_STB=y
+CONFIG_PCI_AARDVARK=y
+CONFIG_PCI_TEGRA=y
+CONFIG_PCIE_RCAR=y
+CONFIG_PCIE_ROCKCHIP=y
+CONFIG_PCIE_ROCKCHIP_HOST=m
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PCI_XGENE=y
+CONFIG_PCI_HOST_THUNDER_PEM=y
+CONFIG_PCI_HOST_THUNDER_ECAM=y
+CONFIG_ARM64_VA_BITS_48=y
+CONFIG_SCHED_MC=y
+CONFIG_NUMA=y
+CONFIG_PREEMPT=y
+CONFIG_KSM=y
+CONFIG_MEMORY_FAILURE=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CMA=y
+CONFIG_SECCOMP=y
+CONFIG_KEXEC=y
+CONFIG_CRASH_DUMP=y
+CONFIG_XEN=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_COMPAT=y
+CONFIG_HIBERNATION=y
+CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_ATTR_SET=y
+CONFIG_CPU_FREQ_GOV_COMMON=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=m
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_CPUFREQ_DT=y
+CONFIG_ARM_ARMADA_37XX_CPUFREQ=y
+CONFIG_ARM_BIG_LITTLE_CPUFREQ=y
+CONFIG_ARM_SCPI_CPUFREQ=y
+CONFIG_ARM_TEGRA186_CPUFREQ=y
+CONFIG_ACPI_CPPC_CPUFREQ=m
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IPV6=m
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_NAT=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_BRIDGE=m
+CONFIG_BRIDGE_VLAN_FILTERING=y
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_VLAN_8021Q_MVRP=y
+CONFIG_BPF_JIT=y
+CONFIG_BT=m
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=m
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=m
+# CONFIG_BT_HS is not set
+# CONFIG_BT_LE is not set
+CONFIG_BT_LEDS=y
+<<<<<<<
+# CONFIG_BT_DEBUGFS is not set
+CONFIG_BT_HCIBTUSB=m
+=======
+CONFIG_BT_HCIBTUSB=m
+CONFIG_BT_HCIBTSDIO=m
+>>>>>>>
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_BCSP=y
+CONFIG_BT_HCIUART_LL=y
+<<<<<<<
+CONFIG_BT_HCIUART_3WIRE=y
+CONFIG_CFG80211=y
+# CONFIG_CFG80211_DEFAULT_PS is not set
+CONFIG_CFG80211_WEXT=y
+CONFIG_MAC80211=y
+=======
+CONFIG_BT_HCIUART_BCM=y
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
+>>>>>>>
+CONFIG_MAC80211_LEDS=y
+CONFIG_RFKILL=y
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DMA_CMA=y
+<<<<<<<
+=======
+CONFIG_CMA_SIZE_MBYTES=32
+CONFIG_HISILICON_LPC=y
+>>>>>>>
+CONFIG_SIMPLE_PM_BUS=y
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_DENALI_DT=y
+CONFIG_MTD_NAND_MARVELL=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_NBD=m
+CONFIG_VIRTIO_BLK=y
+CONFIG_BLK_DEV_NVME=m
+CONFIG_SRAM=y
+CONFIG_EEPROM_AT25=m
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+CONFIG_SCSI_SAS_ATA=y
+CONFIG_SCSI_HISI_SAS=y
+CONFIG_SCSI_HISI_SAS_PCI=y
+<<<<<<<
+CONFIG_SCSI_UFSHCD=m
+CONFIG_SCSI_UFSHCD_PLATFORM=m
+CONFIG_SCSI_UFS_QCOM=m
+=======
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+>>>>>>>
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_AHCI_CEVA=y
+CONFIG_AHCI_MVEBU=y
+CONFIG_AHCI_XGENE=y
+CONFIG_AHCI_QORIQ=y
+CONFIG_SATA_SIL24=y
+CONFIG_SATA_RCAR=y
+CONFIG_PATA_PLATFORM=y
+CONFIG_PATA_OF_PLATFORM=y
+CONFIG_NETDEVICES=y
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_TUN=y
+CONFIG_VETH=m
+CONFIG_VIRTIO_NET=y
+CONFIG_AMD_XGBE=y
+CONFIG_NET_XGENE=y
+<<<<<<<
+CONFIG_ATL1C=m
+=======
+CONFIG_ATL1C=y
+>>>>>>>
+CONFIG_MACB=y
+CONFIG_THUNDER_NIC_PF=y
+CONFIG_HIX5HD2_GMAC=y
+CONFIG_HNS_DSAF=y
+CONFIG_HNS_ENET=y
+CONFIG_E1000E=y
+CONFIG_IGB=y
+CONFIG_IGBVF=y
+CONFIG_MVNETA=y
+CONFIG_MVPP2=y
+CONFIG_SKY2=y
+CONFIG_QCOM_EMAC=m
+CONFIG_RAVB=y
+CONFIG_SMC91X=y
+CONFIG_SMSC911X=y
+CONFIG_SNI_AVE=y
+CONFIG_SNI_NETSEC=y
+CONFIG_STMMAC_ETH=m
+CONFIG_DWMAC_IPQ806X=m
+CONFIG_DWMAC_MESON=m
+CONFIG_DWMAC_ROCKCHIP=m
+CONFIG_DWMAC_SUNXI=m
+CONFIG_DWMAC_SUN8I=m
+CONFIG_MDIO_BUS_MUX_MMIOREG=y
+CONFIG_AT803X_PHY=m
+CONFIG_MARVELL_PHY=m
+CONFIG_MARVELL_10G_PHY=m
+CONFIG_MESON_GXL_PHY=m
+CONFIG_MICREL_PHY=y
+CONFIG_REALTEK_PHY=m
+CONFIG_ROCKCHIP_PHY=y
+CONFIG_USB_PEGASUS=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_RTL8152=m
+CONFIG_USB_LAN78XX=m
+CONFIG_USB_USBNET=m
+CONFIG_USB_NET_DM9601=m
+CONFIG_USB_NET_SR9800=m
+CONFIG_USB_NET_SMSC75XX=m
+CONFIG_USB_NET_SMSC95XX=m
+CONFIG_USB_NET_PLUSB=m
+CONFIG_USB_NET_MCS7830=m
+CONFIG_ATH10K=m
+CONFIG_ATH10K_PCI=m
+CONFIG_BRCMFMAC=m
+<<<<<<<
+CONFIG_ATH10K=y
+CONFIG_ATH10K_PCI=y
+=======
+CONFIG_MWIFIEX=m
+CONFIG_MWIFIEX_PCIE=m
+>>>>>>>
+CONFIG_WL18XX=m
+CONFIG_WLCORE_SDIO=m
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_ADC=m
+CONFIG_KEYBOARD_CROS_EC=y
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ATMEL_MXT=m
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_PM8941_PWRKEY=y
+CONFIG_INPUT_HISI_POWERKEY=y
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_AMBAKMI=y
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_BCM2835AUX=y
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_8250_MT6577=y
+CONFIG_SERIAL_8250_UNIPHIER=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_MESON=y
+CONFIG_SERIAL_MESON_CONSOLE=y
+CONFIG_SERIAL_SAMSUNG=y
+CONFIG_SERIAL_SAMSUNG_CONSOLE=y
+CONFIG_SERIAL_TEGRA=y
+CONFIG_SERIAL_SH_SCI=y
+CONFIG_SERIAL_SH_SCI_NR_UARTS=11
+CONFIG_SERIAL_SH_SCI_CONSOLE=y
+CONFIG_SERIAL_MSM=y
+CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_SERIAL_XILINX_PS_UART=y
+CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
+CONFIG_SERIAL_MVEBU_UART=y
+CONFIG_SERIAL_DEV_BUS=y
+CONFIG_SERIAL_DEV_CTRL_TTYPORT=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_I2C_HID=m
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_MUX_PCA954x=y
+CONFIG_I2C_BCM2835=m
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
+CONFIG_I2C_IMX=y
+CONFIG_I2C_MESON=y
+CONFIG_I2C_MV64XXX=y
+CONFIG_I2C_PXA=y
+CONFIG_I2C_QUP=y
+CONFIG_I2C_RK3X=y
+CONFIG_I2C_SH_MOBILE=y
+CONFIG_I2C_TEGRA=y
+CONFIG_I2C_UNIPHIER_F=y
+CONFIG_I2C_RCAR=y
+CONFIG_I2C_CROS_EC_TUNNEL=y
+CONFIG_SPI=y
+CONFIG_SPI_ARMADA_3700=y
+CONFIG_SPI_MESON_SPICC=m
+CONFIG_SPI_MESON_SPIFC=m
+CONFIG_SPI_BCM2835=m
+CONFIG_SPI_BCM2835AUX=m
+CONFIG_SPI_ORION=y
+CONFIG_SPI_PL022=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_ROCKCHIP=y
+CONFIG_SPI_S3C64XX=y
+CONFIG_SPI_SPIDEV=m
+CONFIG_SPMI=y
+CONFIG_PINCTRL_IPQ8074=y
+CONFIG_PINCTRL_SINGLE=y
+CONFIG_PINCTRL_MAX77620=y
+CONFIG_PINCTRL_MSM8916=y
+CONFIG_PINCTRL_MSM8994=y
+CONFIG_PINCTRL_MSM8996=y
+CONFIG_PINCTRL_MT7622=y
+CONFIG_PINCTRL_QDF2XXX=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_GPIO_DWAPB=y
+CONFIG_GPIO_MB86S7X=y
+CONFIG_GPIO_PL061=y
+CONFIG_GPIO_RCAR=y
+CONFIG_GPIO_UNIPHIER=y
+CONFIG_GPIO_XGENE=y
+CONFIG_GPIO_XGENE_SB=y
+CONFIG_GPIO_PCA953X=y
+CONFIG_GPIO_PCA953X_IRQ=y
+CONFIG_GPIO_MAX77620=y
+CONFIG_POWER_AVS=y
+<<<<<<<
+=======
+CONFIG_ROCKCHIP_IODOMAIN=y
+>>>>>>>
+CONFIG_POWER_RESET_MSM=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_SYSCON_REBOOT_MODE=y
+CONFIG_BATTERY_BQ27XXX=y
+CONFIG_SENSORS_ARM_SCPI=y
+CONFIG_SENSORS_LM90=m
+CONFIG_SENSORS_INA2XX=m
+CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
+CONFIG_CPU_THERMAL=y
+CONFIG_THERMAL_EMULATION=y
+CONFIG_ARMADA_THERMAL=y
+CONFIG_BRCMSTB_THERMAL=m
+CONFIG_EXYNOS_THERMAL=y
+CONFIG_RCAR_GEN3_THERMAL=y
+CONFIG_QCOM_TSENS=y
+CONFIG_ROCKCHIP_THERMAL=m
+CONFIG_TEGRA_BPMP_THERMAL=m
+CONFIG_UNIPHIER_THERMAL=y
+CONFIG_WATCHDOG=y
+CONFIG_S3C2410_WATCHDOG=y
+CONFIG_MESON_GXBB_WATCHDOG=m
+CONFIG_MESON_WATCHDOG=m
+CONFIG_RENESAS_WDT=y
+CONFIG_UNIPHIER_WATCHDOG=y
+CONFIG_BCM2835_WDT=y
+CONFIG_MFD_AXP20X_RSB=y
+CONFIG_MFD_CROS_EC=y
+CONFIG_MFD_CROS_EC_I2C=y
+CONFIG_MFD_CROS_EC_SPI=y
+CONFIG_MFD_CROS_EC_CHARDEV=m
+CONFIG_MFD_EXYNOS_LPASS=m
+CONFIG_MFD_HI6421_PMIC=y
+CONFIG_MFD_HI655X_PMIC=y
+CONFIG_MFD_MAX77620=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_RK808=y
+CONFIG_MFD_SEC_CORE=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_AXP20X=y
+CONFIG_REGULATOR_FAN53555=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_GPIO=y
+CONFIG_REGULATOR_HI6421V530=y
+CONFIG_REGULATOR_HI655X=y
+CONFIG_REGULATOR_MAX77620=y
+CONFIG_REGULATOR_PWM=y
+CONFIG_REGULATOR_QCOM_SMD_RPM=y
+CONFIG_REGULATOR_QCOM_SPMI=y
+CONFIG_REGULATOR_RK808=y
+CONFIG_REGULATOR_S2MPS11=y
+CONFIG_MEDIA_SUPPORT=m
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_ANALOG_TV_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_MEDIA_RC_SUPPORT=y
+CONFIG_RC_CORE=m
+CONFIG_RC_DEVICES=y
+CONFIG_RC_DECODERS=y
+CONFIG_IR_MESON=m
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+# CONFIG_DVB_NET is not set
+CONFIG_V4L_MEM2MEM_DRIVERS=y
+CONFIG_VIDEO_SAMSUNG_S5P_JPEG=m
+CONFIG_VIDEO_SAMSUNG_S5P_MFC=m
+CONFIG_VIDEO_SAMSUNG_EXYNOS_GSC=m
+CONFIG_VIDEO_RENESAS_FCP=m
+CONFIG_VIDEO_RENESAS_VSP1=m
+CONFIG_DRM=y
+CONFIG_DRM_NOUVEAU=m
+CONFIG_DRM_EXYNOS=m
+CONFIG_DRM_EXYNOS5433_DECON=y
+CONFIG_DRM_EXYNOS7_DECON=y
+CONFIG_DRM_EXYNOS_DSI=y
+# CONFIG_DRM_EXYNOS_DP is not set
+CONFIG_DRM_EXYNOS_HDMI=y
+CONFIG_DRM_EXYNOS_MIC=y
+CONFIG_DRM_ROCKCHIP=m
+CONFIG_ROCKCHIP_ANALOGIX_DP=y
+CONFIG_ROCKCHIP_CDN_DP=y
+CONFIG_ROCKCHIP_DW_HDMI=y
+CONFIG_ROCKCHIP_DW_MIPI_DSI=y
+CONFIG_ROCKCHIP_INNO_HDMI=y
+CONFIG_DRM_RCAR_DU=m
+CONFIG_DRM_RCAR_LVDS=y
+CONFIG_DRM_RCAR_VSP=y
+CONFIG_DRM_TEGRA=m
+CONFIG_DRM_PANEL_SIMPLE=m
+CONFIG_DRM_I2C_ADV7511=m
+CONFIG_DRM_I2C_ADV7511_AUDIO=y
+CONFIG_DRM_VC4=m
+CONFIG_DRM_HISI_HIBMC=m
+CONFIG_DRM_HISI_KIRIN=m
+CONFIG_DRM_MESON=m
+CONFIG_FB=y
+CONFIG_FB_ARMCLCD=y
+CONFIG_BACKLIGHT_GENERIC=m
+CONFIG_BACKLIGHT_PWM=m
+CONFIG_BACKLIGHT_LP855X=m
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SND_BCM2835_SOC_I2S=m
+CONFIG_SND_SOC_QCOM=y
+CONFIG_SND_SOC_APQ8016_SBC=y
+CONFIG_SND_SOC_QDSP6=y
+CONFIG_SND_SOC_MSM8996=y
+CONFIG_SND_SOC_SAMSUNG=y
+CONFIG_SND_SOC_RCAR=m
+CONFIG_SND_SOC_AK4613=m
+<<<<<<<
+CONFIG_SND_SIMPLE_CARD=m
+CONFIG_SND_AUDIO_GRAPH_CARD=m
+=======
+CONFIG_SND_SOC_MSM8916_WCD_ANALOG=y
+CONFIG_SND_SOC_MSM8916_WCD_DIGITAL=y
+CONFIG_SND_SIMPLE_CARD=y
+>>>>>>>
+CONFIG_USB=y
+CONFIG_USB_OTG=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_TEGRA=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_EXYNOS=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_EXYNOS=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_RENESAS_USBHS=m
+CONFIG_USB_STORAGE=y
+CONFIG_USB_MUSB_HDRC=y
+CONFIG_USB_MUSB_SUNXI=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC2=y
+CONFIG_USB_CHIPIDEA=y
+CONFIG_USB_CHIPIDEA_UDC=y
+CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_USB_CHIPIDEA_ULPI=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_HSIC_USB3503=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_ULPI=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_RENESAS_USBHS_UDC=m
+CONFIG_USB_RENESAS_USB3=m
+CONFIG_USB_ULPI_BUS=y
+CONFIG_MMC=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_ARMMMCI=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_ACPI=y
+CONFIG_MMC_SDHCI_F_SDH30=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_OF_ARASAN=y
+CONFIG_MMC_SDHCI_OF_ESDHC=y
+CONFIG_MMC_SDHCI_CADENCE=y
+CONFIG_MMC_SDHCI_TEGRA=y
+CONFIG_MMC_MESON_GX=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_SPI=y
+CONFIG_MMC_SDHI=y
+CONFIG_MMC_DW=y
+CONFIG_MMC_DW_EXYNOS=y
+CONFIG_MMC_DW_HI3798CV200=y
+CONFIG_MMC_DW_K3=y
+CONFIG_MMC_DW_ROCKCHIP=y
+CONFIG_MMC_SUNXI=y
+CONFIG_MMC_BCM2835=y
+CONFIG_MMC_SDHCI_XENON=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_PWM=y
+CONFIG_LEDS_SYSCON=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_LEDS_TRIGGER_PANIC=y
+CONFIG_LEDS_TRIGGER_DISK=y
+CONFIG_EDAC=y
+CONFIG_EDAC_GHES=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_MAX77686=y
+CONFIG_RTC_DRV_RK808=m
+CONFIG_RTC_DRV_S5M=y
+CONFIG_RTC_DRV_DS3232=y
+CONFIG_RTC_DRV_EFI=y
+CONFIG_RTC_DRV_S3C=y
+CONFIG_RTC_DRV_PL031=y
+CONFIG_RTC_DRV_SUN6I=y
+CONFIG_RTC_DRV_ARMADA38X=y
+CONFIG_RTC_DRV_TEGRA=y
+CONFIG_RTC_DRV_XGENE=y
+CONFIG_RTC_DRV_CROS_EC=y
+CONFIG_DMADEVICES=y
+CONFIG_DMA_BCM2835=m
+CONFIG_K3_DMA=y
+CONFIG_MV_XOR_V2=y
+CONFIG_PL330_DMA=y
+CONFIG_TEGRA20_APB_DMA=y
+CONFIG_QCOM_BAM_DMA=y
+CONFIG_QCOM_HIDMA_MGMT=y
+CONFIG_QCOM_HIDMA=y
+CONFIG_RCAR_DMAC=y
+CONFIG_RENESAS_USB_DMAC=m
+CONFIG_VFIO=y
+CONFIG_VFIO_PCI=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_XEN_GNTDEV=y
+CONFIG_XEN_GRANT_DEV_ALLOC=y
+CONFIG_COMMON_CLK_RK808=y
+CONFIG_COMMON_CLK_SCPI=y
+CONFIG_COMMON_CLK_CS2000_CP=y
+CONFIG_COMMON_CLK_S2MPS11=y
+CONFIG_CLK_QORIQ=y
+CONFIG_COMMON_CLK_PWM=y
+CONFIG_COMMON_CLK_QCOM=y
+CONFIG_QCOM_CLK_SMD_RPM=y
+CONFIG_IPQ_GCC_8074=y
+CONFIG_MSM_GCC_8916=y
+CONFIG_MSM_GCC_8994=y
+CONFIG_MSM_MMCC_8996=y
+CONFIG_HWSPINLOCK=y
+CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_ARM_MHU=y
+CONFIG_PLATFORM_MHU=y
+CONFIG_BCM2835_MBOX=y
+CONFIG_HI6220_MBOX=y
+CONFIG_QCOM_APCS_IPC=y
+CONFIG_ROCKCHIP_IOMMU=y
+CONFIG_TEGRA_IOMMU_SMMU=y
+CONFIG_ARM_SMMU=y
+CONFIG_ARM_SMMU_V3=y
+CONFIG_QCOM_IOMMU=y
+<<<<<<<
+CONFIG_REMOTEPROC=y
+CONFIG_QCOM_ADSP_PIL=y
+CONFIG_QCOM_Q6V5_PIL=y
+CONFIG_QCOM_WCNSS_PIL=y
+CONFIG_RPMSG_QCOM_GLINK_RPM=y
+CONFIG_RPMSG_QCOM_GLINK_SMEM=y
+=======
+CONFIG_RPMSG_QCOM_GLINK_RPM=y
+>>>>>>>
+CONFIG_RPMSG_QCOM_SMD=y
+CONFIG_RASPBERRYPI_POWER=y
+CONFIG_QCOM_SMEM=y
+CONFIG_QCOM_SMD_RPM=y
+CONFIG_QCOM_SMP2P=y
+CONFIG_QCOM_SMSM=y
+CONFIG_QCOM_WCNSS_CTRL=y
+CONFIG_QCOM_APR=y
+CONFIG_ROCKCHIP_PM_DOMAINS=y
+CONFIG_ARCH_TEGRA_132_SOC=y
+CONFIG_ARCH_TEGRA_210_SOC=y
+CONFIG_ARCH_TEGRA_186_SOC=y
+CONFIG_ARCH_TEGRA_194_SOC=y
+CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
+CONFIG_EXTCON_USB_GPIO=y
+CONFIG_EXTCON_USBC_CROS_EC=y
+CONFIG_MEMORY=y
+CONFIG_TEGRA_MC=y
+CONFIG_IIO=y
+CONFIG_EXYNOS_ADC=y
+CONFIG_ROCKCHIP_SARADC=m
+CONFIG_IIO_CROS_EC_SENSORS_CORE=m
+CONFIG_IIO_CROS_EC_SENSORS=m
+CONFIG_IIO_CROS_EC_LIGHT_PROX=m
+CONFIG_IIO_CROS_EC_BARO=m
+CONFIG_PWM=y
+CONFIG_PWM_BCM2835=m
+CONFIG_PWM_CROS_EC=m
+CONFIG_PWM_MESON=m
+CONFIG_PWM_RCAR=m
+CONFIG_PWM_ROCKCHIP=y
+CONFIG_PWM_SAMSUNG=y
+CONFIG_PWM_TEGRA=m
+CONFIG_PHY_HISTB_COMBPHY=y
+CONFIG_PHY_HISI_INNO_USB2=y
+CONFIG_PHY_RCAR_GEN3_USB2=y
+CONFIG_PHY_RCAR_GEN3_USB3=m
+CONFIG_PHY_HI6220_USB=y
+CONFIG_PHY_QCOM_QMP=y
+CONFIG_PHY_QCOM_USB_HS=y
+CONFIG_PHY_SUN4I_USB=y
+CONFIG_PHY_MVEBU_CP110_COMPHY=y
+CONFIG_PHY_QCOM_QMP=m
+CONFIG_PHY_ROCKCHIP_INNO_USB2=y
+CONFIG_PHY_ROCKCHIP_EMMC=y
+CONFIG_PHY_ROCKCHIP_PCIE=m
+CONFIG_PHY_ROCKCHIP_TYPEC=y
+CONFIG_PHY_XGENE=y
+CONFIG_PHY_TEGRA_XUSB=y
+CONFIG_QCOM_L2_PMU=y
+CONFIG_QCOM_L3_PMU=y
+CONFIG_MESON_EFUSE=m
+CONFIG_QCOM_QFPROM=y
+CONFIG_ROCKCHIP_EFUSE=y
+CONFIG_UNIPHIER_EFUSE=y
+CONFIG_TEE=y
+CONFIG_OPTEE=y
+CONFIG_ARM_SCPI_PROTOCOL=y
+CONFIG_RASPBERRYPI_FIRMWARE=y
+CONFIG_EFI_CAPSULE_LOADER=y
+CONFIG_ACPI=y
+CONFIG_ACPI_APEI=y
+CONFIG_ACPI_APEI_GHES=y
+CONFIG_ACPI_APEI_PCIEAER=y
+CONFIG_ACPI_APEI_MEMORY_FAILURE=y
+CONFIG_ACPI_APEI_EINJ=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
+CONFIG_QUOTA=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
+CONFIG_VFAT_FS=y
+CONFIG_HUGETLBFS=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_EFIVAR_FS=y
+CONFIG_SQUASHFS=y
+CONFIG_UFS_FS=y
+CONFIG_UFS_FS_WRITE=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
+CONFIG_9P_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_LOCKUP_DETECTOR=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_FTRACE is not set
+CONFIG_MEMTEST=y
+CONFIG_SECURITY=y
+CONFIG_CRYPTO_ECHAINIV=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA256_ARM64=m
+CONFIG_CRYPTO_SHA512_ARM64=m
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m
+CONFIG_CRYPTO_CRC32_ARM64_CE=m
+CONFIG_CRYPTO_AES_ARM64=m
+CONFIG_CRYPTO_AES_ARM64_CE=m
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=m
+CONFIG_CRYPTO_CHACHA20_NEON=m
+CONFIG_CRYPTO_AES_ARM64_BS=m
+CONFIG_CRYPTO_SHA512_ARM64_CE=m
+CONFIG_CRYPTO_SHA3_ARM64=m
+CONFIG_CRYPTO_SM3_ARM64_CE=m
diff --git a/rr-cache/09b4becfe0c54fb49c1464dea98b9b48d956217b/postimage b/rr-cache/09b4becfe0c54fb49c1464dea98b9b48d956217b/postimage
new file mode 100644
index 0000000..35514e6
--- /dev/null
+++ b/rr-cache/09b4becfe0c54fb49c1464dea98b9b48d956217b/postimage
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _Q6_PCM_ROUTING_H
+#define _Q6_PCM_ROUTING_H
+
+int q6routing_stream_open(int fedai_id, int perf_mode,
+ int stream_id, int stream_type);
+void q6routing_stream_close(int fedai_id, int stream_type);
+
+#endif /*_Q6_PCM_ROUTING_H */
diff --git a/rr-cache/09b4becfe0c54fb49c1464dea98b9b48d956217b/preimage b/rr-cache/09b4becfe0c54fb49c1464dea98b9b48d956217b/preimage
new file mode 100644
index 0000000..8001f18
--- /dev/null
+++ b/rr-cache/09b4becfe0c54fb49c1464dea98b9b48d956217b/preimage
@@ -0,0 +1,13 @@
+<<<<<<<
+/* SPDX-License-Identifier: GPL-2.0 */
+=======
+// SPDX-License-Identifier: GPL-2.0
+>>>>>>>
+#ifndef _Q6_PCM_ROUTING_H
+#define _Q6_PCM_ROUTING_H
+
+int q6routing_stream_open(int fedai_id, int perf_mode,
+ int stream_id, int stream_type);
+void q6routing_stream_close(int fedai_id, int stream_type);
+
+#endif /*_Q6_PCM_ROUTING_H */
diff --git a/rr-cache/09b4becfe0c54fb49c1464dea98b9b48d956217b/thisimage b/rr-cache/09b4becfe0c54fb49c1464dea98b9b48d956217b/thisimage
new file mode 100644
index 0000000..8001f18
--- /dev/null
+++ b/rr-cache/09b4becfe0c54fb49c1464dea98b9b48d956217b/thisimage
@@ -0,0 +1,13 @@
+<<<<<<<
+/* SPDX-License-Identifier: GPL-2.0 */
+=======
+// SPDX-License-Identifier: GPL-2.0
+>>>>>>>
+#ifndef _Q6_PCM_ROUTING_H
+#define _Q6_PCM_ROUTING_H
+
+int q6routing_stream_open(int fedai_id, int perf_mode,
+ int stream_id, int stream_type);
+void q6routing_stream_close(int fedai_id, int stream_type);
+
+#endif /*_Q6_PCM_ROUTING_H */
diff --git a/rr-cache/0ac478e0d1fd3f2fc2e4604b3d28397dabba37a9/postimage b/rr-cache/0ac478e0d1fd3f2fc2e4604b3d28397dabba37a9/postimage
new file mode 100644
index 0000000..01094d1
--- /dev/null
+++ b/rr-cache/0ac478e0d1fd3f2fc2e4604b3d28397dabba37a9/postimage
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __Q6DSP_COMMON_H__
+#define __Q6DSP_COMMON_H__
+
+#include <linux/kernel.h>
+
+#define PCM_MAX_NUM_CHANNEL 8
+#define PCM_CHANNEL_NULL 0
+
+#define PCM_CHANNEL_FL 1 /* Front left channel. */
+#define PCM_CHANNEL_FR 2 /* Front right channel. */
+#define PCM_CHANNEL_FC 3 /* Front center channel. */
+#define PCM_CHANNEL_LS 4 /* Left surround channel. */
+#define PCM_CHANNEL_RS 5 /* Right surround channel. */
+#define PCM_CHANNEL_LFE 6 /* Low frequency effect channel. */
+#define PCM_CHANNEL_CS 7 /* Center surround channel; Rear center ch */
+#define PCM_CHANNEL_LB 8 /* Left back channel; Rear left channel. */
+#define PCM_CHANNEL_RB 9 /* Right back channel; Rear right channel. */
+#define PCM_CHANNELS 10 /* Top surround channel. */
+
+int q6dsp_map_channels(u8 ch_map[PCM_MAX_NUM_CHANNEL], int ch);
+
+#endif /* __Q6DSP_COMMON_H__ */
diff --git a/rr-cache/0ac478e0d1fd3f2fc2e4604b3d28397dabba37a9/preimage b/rr-cache/0ac478e0d1fd3f2fc2e4604b3d28397dabba37a9/preimage
new file mode 100644
index 0000000..a2f93fb
--- /dev/null
+++ b/rr-cache/0ac478e0d1fd3f2fc2e4604b3d28397dabba37a9/preimage
@@ -0,0 +1,36 @@
+<<<<<<<
+/* SPDX-License-Identifier: GPL-2.0 */
+=======
+// SPDX-License-Identifier: GPL-2.0
+>>>>>>>
+
+#ifndef __Q6DSP_COMMON_H__
+#define __Q6DSP_COMMON_H__
+
+#include <linux/kernel.h>
+
+<<<<<<<
+#define PCM_FORMAT_MAX_NUM_CHANNEL 8
+=======
+#define PCM_MAX_NUM_CHANNEL 8
+>>>>>>>
+#define PCM_CHANNEL_NULL 0
+
+#define PCM_CHANNEL_FL 1 /* Front left channel. */
+#define PCM_CHANNEL_FR 2 /* Front right channel. */
+#define PCM_CHANNEL_FC 3 /* Front center channel. */
+#define PCM_CHANNEL_LS 4 /* Left surround channel. */
+#define PCM_CHANNEL_RS 5 /* Right surround channel. */
+#define PCM_CHANNEL_LFE 6 /* Low frequency effect channel. */
+#define PCM_CHANNEL_CS 7 /* Center surround channel; Rear center ch */
+#define PCM_CHANNEL_LB 8 /* Left back channel; Rear left channel. */
+#define PCM_CHANNEL_RB 9 /* Right back channel; Rear right channel. */
+#define PCM_CHANNELS 10 /* Top surround channel. */
+
+<<<<<<<
+int q6dsp_map_channels(u8 ch_map[PCM_FORMAT_MAX_NUM_CHANNEL], int ch);
+=======
+int q6dsp_map_channels(u8 ch_map[PCM_MAX_NUM_CHANNEL], int ch);
+>>>>>>>
+
+#endif /* __Q6DSP_COMMON_H__ */
diff --git a/rr-cache/0ac478e0d1fd3f2fc2e4604b3d28397dabba37a9/thisimage b/rr-cache/0ac478e0d1fd3f2fc2e4604b3d28397dabba37a9/thisimage
new file mode 100644
index 0000000..a2f93fb
--- /dev/null
+++ b/rr-cache/0ac478e0d1fd3f2fc2e4604b3d28397dabba37a9/thisimage
@@ -0,0 +1,36 @@
+<<<<<<<
+/* SPDX-License-Identifier: GPL-2.0 */
+=======
+// SPDX-License-Identifier: GPL-2.0
+>>>>>>>
+
+#ifndef __Q6DSP_COMMON_H__
+#define __Q6DSP_COMMON_H__
+
+#include <linux/kernel.h>
+
+<<<<<<<
+#define PCM_FORMAT_MAX_NUM_CHANNEL 8
+=======
+#define PCM_MAX_NUM_CHANNEL 8
+>>>>>>>
+#define PCM_CHANNEL_NULL 0
+
+#define PCM_CHANNEL_FL 1 /* Front left channel. */
+#define PCM_CHANNEL_FR 2 /* Front right channel. */
+#define PCM_CHANNEL_FC 3 /* Front center channel. */
+#define PCM_CHANNEL_LS 4 /* Left surround channel. */
+#define PCM_CHANNEL_RS 5 /* Right surround channel. */
+#define PCM_CHANNEL_LFE 6 /* Low frequency effect channel. */
+#define PCM_CHANNEL_CS 7 /* Center surround channel; Rear center ch */
+#define PCM_CHANNEL_LB 8 /* Left back channel; Rear left channel. */
+#define PCM_CHANNEL_RB 9 /* Right back channel; Rear right channel. */
+#define PCM_CHANNELS 10 /* Top surround channel. */
+
+<<<<<<<
+int q6dsp_map_channels(u8 ch_map[PCM_FORMAT_MAX_NUM_CHANNEL], int ch);
+=======
+int q6dsp_map_channels(u8 ch_map[PCM_MAX_NUM_CHANNEL], int ch);
+>>>>>>>
+
+#endif /* __Q6DSP_COMMON_H__ */
diff --git a/rr-cache/0f0d636541673b35ab6e901db0d60610f0cf1c49/preimage b/rr-cache/0f0d636541673b35ab6e901db0d60610f0cf1c49/preimage
new file mode 100644
index 0000000..3e04e50
--- /dev/null
+++ b/rr-cache/0f0d636541673b35ab6e901db0d60610f0cf1c49/preimage
@@ -0,0 +1,2857 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gcc-msm8998.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-alpha-pll.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+#include "gdsc.h"
+
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+
+enum {
+ P_AUD_REF_CLK,
+ P_CORE_BI_PLL_TEST_SE,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL4_OUT_MAIN,
+ P_PLL0_EARLY_DIV_CLK_SRC,
+ P_SLEEP_CLK,
+ P_XO,
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_PLL0_EARLY_DIV_CLK_SRC, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_0[] = {
+ "xo",
+ "gpll0_out_main",
+ "gpll0_out_main",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_1[] = {
+ "xo",
+ "gpll0_out_main",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_PLL0_EARLY_DIV_CLK_SRC, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_2[] = {
+ "xo",
+ "gpll0_out_main",
+ "core_pi_sleep_clk",
+ "gpll0_out_main",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_XO, 0 },
+ { P_SLEEP_CLK, 5 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_3[] = {
+ "xo",
+ "core_pi_sleep_clk",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_4[] = {
+ "xo",
+ "gpll0_out_main",
+ "gpll4_out_main",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_AUD_REF_CLK, 2 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_5[] = {
+ "xo",
+ "gpll0_out_main",
+ "aud_ref_clk",
+ "core_bi_pll_test_se",
+};
+
+static struct pll_vco fabia_vco[] = {
+ { 250000000, 2000000000, 0 },
+ { 125000000, 1000000000, 1 },
+};
+
+static struct clk_alpha_pll gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ }
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_even = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_even",
+ .parent_names = (const char *[]){ "gpll0" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_main = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_main",
+ .parent_names = (const char *[]){ "gpll0" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_odd = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_odd",
+ .parent_names = (const char *[]){ "gpll0" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_test = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_test",
+ .parent_names = (const char *[]){ "gpll0" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll1 = {
+ .offset = 0x1000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll1",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ }
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll1_out_even = {
+ .offset = 0x1000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll1_out_even",
+ .parent_names = (const char *[]){ "gpll1" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll1_out_main = {
+ .offset = 0x1000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll1_out_main",
+ .parent_names = (const char *[]){ "gpll1" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll1_out_odd = {
+ .offset = 0x1000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll1_out_odd",
+ .parent_names = (const char *[]){ "gpll1" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll1_out_test = {
+ .offset = 0x1000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll1_out_test",
+ .parent_names = (const char *[]){ "gpll1" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll2 = {
+ .offset = 0x2000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll2",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ }
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll2_out_even = {
+ .offset = 0x2000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll2_out_even",
+ .parent_names = (const char *[]){ "gpll2" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll2_out_main = {
+ .offset = 0x2000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll2_out_main",
+ .parent_names = (const char *[]){ "gpll2" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll2_out_odd = {
+ .offset = 0x2000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll2_out_odd",
+ .parent_names = (const char *[]){ "gpll2" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll2_out_test = {
+ .offset = 0x2000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll2_out_test",
+ .parent_names = (const char *[]){ "gpll2" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll3 = {
+ .offset = 0x3000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll3",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ }
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll3_out_even = {
+ .offset = 0x3000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll3_out_even",
+ .parent_names = (const char *[]){ "gpll3" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll3_out_main = {
+ .offset = 0x3000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll3_out_main",
+ .parent_names = (const char *[]){ "gpll3" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll3_out_odd = {
+ .offset = 0x3000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll3_out_odd",
+ .parent_names = (const char *[]){ "gpll3" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll3_out_test = {
+ .offset = 0x3000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll3_out_test",
+ .parent_names = (const char *[]){ "gpll3" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll4 = {
+ .offset = 0x77000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll4",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ }
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll4_out_even = {
+ .offset = 0x77000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll4_out_even",
+ .parent_names = (const char *[]){ "gpll4" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll4_out_main = {
+ .offset = 0x77000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll4_out_main",
+ .parent_names = (const char *[]){ "gpll4" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll4_out_odd = {
+ .offset = 0x77000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll4_out_odd",
+ .parent_names = (const char *[]){ "gpll4" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll4_out_test = {
+ .offset = 0x77000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll4_out_test",
+ .parent_names = (const char *[]){ "gpll4" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_blsp1_qup1_i2c_apps_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x19020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_blsp1_qup1_spi_apps_clk_src[] = {
+ F(960000, P_XO, 10, 1, 2),
+ F(4800000, P_XO, 4, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(15000000, P_GPLL0_OUT_MAIN, 10, 1, 4),
+ F(19200000, P_XO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x1900c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x1b020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup2_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x1b00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup2_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x1d020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup3_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0x1d00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup3_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x1f020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup4_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x1f00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup4_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup5_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x21020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup5_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup5_spi_apps_clk_src = {
+ .cmd_rcgr = 0x2100c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup5_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup6_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x23020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup6_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup6_spi_apps_clk_src = {
+ .cmd_rcgr = 0x2300c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup6_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_blsp1_uart1_apps_clk_src[] = {
+ F(3686400, P_GPLL0_OUT_MAIN, 1, 96, 15625),
+ F(7372800, P_GPLL0_OUT_MAIN, 1, 192, 15625),
+ F(14745600, P_GPLL0_OUT_MAIN, 1, 384, 15625),
+ F(16000000, P_GPLL0_OUT_MAIN, 5, 2, 15),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0_OUT_MAIN, 5, 1, 5),
+ F(32000000, P_GPLL0_OUT_MAIN, 1, 4, 75),
+ F(40000000, P_GPLL0_OUT_MAIN, 15, 0, 0),
+ F(46400000, P_GPLL0_OUT_MAIN, 1, 29, 375),
+ F(48000000, P_GPLL0_OUT_MAIN, 12.5, 0, 0),
+ F(51200000, P_GPLL0_OUT_MAIN, 1, 32, 375),
+ F(56000000, P_GPLL0_OUT_MAIN, 1, 7, 75),
+ F(58982400, P_GPLL0_OUT_MAIN, 1, 1536, 15625),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ F(63157895, P_GPLL0_OUT_MAIN, 9.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x1a00c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart1_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x1c00c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart2_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart3_apps_clk_src = {
+ .cmd_rcgr = 0x1e00c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart3_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x26020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup1_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x2600c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup1_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x28020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup2_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x2800c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup2_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x2a020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup3_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0x2a00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup3_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x2c020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup4_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x2c00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup4_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup5_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x2e020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup5_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup5_spi_apps_clk_src = {
+ .cmd_rcgr = 0x2e00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup5_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup6_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x30020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup6_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup6_spi_apps_clk_src = {
+ .cmd_rcgr = 0x3000c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup6_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x2700c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_uart1_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x2900c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_uart2_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart3_apps_clk_src = {
+ .cmd_rcgr = 0x2b00c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_uart3_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gp1_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gp1_clk_src = {
+ .cmd_rcgr = 0x64004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp1_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gp2_clk_src = {
+ .cmd_rcgr = 0x65004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp2_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gp3_clk_src = {
+ .cmd_rcgr = 0x66004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp3_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_hmss_ahb_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(37500000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 hmss_ahb_clk_src = {
+ .cmd_rcgr = 0x48014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_hmss_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "hmss_ahb_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_hmss_rbcpr_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 hmss_rbcpr_clk_src = {
+ .cmd_rcgr = 0x48044,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_hmss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "hmss_rbcpr_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_pcie_aux_clk_src[] = {
+ F(1010526, P_XO, 1, 1, 19),
+ { }
+};
+
+static struct clk_rcg2 pcie_aux_clk_src = {
+ .cmd_rcgr = 0x6c000,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_pcie_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pcie_aux_clk_src",
+ .parent_names = gcc_parent_names_3,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_pdm2_clk_src[] = {
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 pdm2_clk_src = {
+ .cmd_rcgr = 0x33010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_pdm2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pdm2_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_sdcc2_apps_clk_src[] = {
+ F(144000, P_XO, 16, 3, 25),
+ F(400000, P_XO, 12, 1, 4),
+ F(20000000, P_GPLL0_OUT_MAIN, 15, 1, 2),
+ F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x14010,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc2_apps_clk_src",
+ .parent_names = gcc_parent_names_4,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_sdcc4_apps_clk_src[] = {
+ F(144000, P_XO, 16, 3, 25),
+ F(400000, P_XO, 12, 1, 4),
+ F(20000000, P_GPLL0_OUT_MAIN, 15, 1, 2),
+ F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc4_apps_clk_src = {
+ .cmd_rcgr = 0x16010,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_sdcc4_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc4_apps_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_tsif_ref_clk_src[] = {
+ F(105495, P_XO, 1, 1, 182),
+ { }
+};
+
+static struct clk_rcg2 tsif_ref_clk_src = {
+ .cmd_rcgr = 0x36010,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_tsif_ref_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "tsif_ref_clk_src",
+ .parent_names = gcc_parent_names_5,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_ufs_axi_clk_src[] = {
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ufs_axi_clk_src = {
+ .cmd_rcgr = 0x75018,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_ufs_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ufs_axi_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_usb30_master_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(120000000, P_GPLL0_OUT_MAIN, 5, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb30_master_clk_src = {
+ .cmd_rcgr = 0xf014,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_usb30_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb30_master_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 usb30_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xf028,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_hmss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb30_mock_utmi_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_usb3_phy_aux_clk_src[] = {
+ F(1200000, P_XO, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb3_phy_aux_clk_src = {
+ .cmd_rcgr = 0x5000c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_usb3_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb3_phy_aux_clk_src",
+ .parent_names = gcc_parent_names_3,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre1_noc_xo_clk = {
+ .halt_reg = 0x8202c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8202c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre1_noc_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre1_ufs_axi_clk = {
+ .halt_reg = 0x82028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x82028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre1_ufs_axi_clk",
+ .parent_names = (const char *[]){
+ "ufs_axi_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre1_usb3_axi_clk = {
+ .halt_reg = 0x82024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x82024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre1_usb3_axi_clk",
+ .parent_names = (const char *[]){
+ "usb30_master_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_apss_qdss_tsctr_div2_clk = {
+ .halt_reg = 0x48090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x48090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_apss_qdss_tsctr_div2_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_apss_qdss_tsctr_div8_clk = {
+ .halt_reg = 0x48094,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x48094,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_apss_qdss_tsctr_div8_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_bimc_hmss_axi_clk = {
+ .halt_reg = 0x48004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_bimc_hmss_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_bimc_mss_q6_axi_clk = {
+ .halt_reg = 0x4401c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4401c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_bimc_mss_q6_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_ahb_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
+ .halt_reg = 0x19008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x19008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup1_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
+ .halt_reg = 0x19004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x19004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup1_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
+ .halt_reg = 0x1b008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1b008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup2_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
+ .halt_reg = 0x1b004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1b004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup2_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
+ .halt_reg = 0x1d008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1d008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup3_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
+ .halt_reg = 0x1d004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1d004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup3_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
+ .halt_reg = 0x1f008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1f008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup4_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
+ .halt_reg = 0x1f004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1f004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup4_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup5_i2c_apps_clk = {
+ .halt_reg = 0x21008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x21008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup5_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup5_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup5_spi_apps_clk = {
+ .halt_reg = 0x21004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x21004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup5_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup5_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup6_i2c_apps_clk = {
+ .halt_reg = 0x23008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x23008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup6_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup6_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup6_spi_apps_clk = {
+ .halt_reg = 0x23004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x23004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup6_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup6_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_sleep_clk = {
+ .halt_reg = 0x17008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart1_apps_clk = {
+ .halt_reg = 0x1a004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart1_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_uart1_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart2_apps_clk = {
+ .halt_reg = 0x1c004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart2_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_uart2_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart3_apps_clk = {
+ .halt_reg = 0x1e004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1e004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart3_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_uart3_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_ahb_clk = {
+ .halt_reg = 0x25004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup1_i2c_apps_clk = {
+ .halt_reg = 0x26008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x26008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup1_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup1_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup1_spi_apps_clk = {
+ .halt_reg = 0x26004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x26004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup1_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup1_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup2_i2c_apps_clk = {
+ .halt_reg = 0x28008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup2_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup2_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup2_spi_apps_clk = {
+ .halt_reg = 0x28004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup2_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup2_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup3_i2c_apps_clk = {
+ .halt_reg = 0x2a008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2a008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup3_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup3_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup3_spi_apps_clk = {
+ .halt_reg = 0x2a004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup3_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup3_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup4_i2c_apps_clk = {
+ .halt_reg = 0x2c008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2c008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup4_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup4_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup4_spi_apps_clk = {
+ .halt_reg = 0x2c004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup4_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup4_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup5_i2c_apps_clk = {
+ .halt_reg = 0x2e008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2e008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup5_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup5_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup5_spi_apps_clk = {
+ .halt_reg = 0x2e004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2e004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup5_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup5_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup6_i2c_apps_clk = {
+ .halt_reg = 0x30008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x30008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup6_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup6_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup6_spi_apps_clk = {
+ .halt_reg = 0x30004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x30004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup6_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup6_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_sleep_clk = {
+ .halt_reg = 0x25008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart1_apps_clk = {
+ .halt_reg = 0x27004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x27004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_uart1_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_uart1_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart2_apps_clk = {
+ .halt_reg = 0x29004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x29004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_uart2_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_uart2_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart3_apps_clk = {
+ .halt_reg = 0x2b004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_uart3_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_uart3_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_axi_clk = {
+ .halt_reg = 0x5018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_axi_clk",
+ .parent_names = (const char *[]){
+ "usb30_master_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x64000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x64000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_names = (const char *[]){
+ "gp1_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x65000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x65000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_names = (const char *[]){
+ "gp2_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x66000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x66000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_names = (const char *[]){
+ "gp3_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_bimc_gfx_clk = {
+ .halt_reg = 0x71010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x71010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_bimc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_bimc_gfx_src_clk = {
+ .halt_reg = 0x7100c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7100c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_bimc_gfx_src_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_cfg_ahb_clk = {
+ .halt_reg = 0x71004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x71004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x71018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x71018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_hmss_ahb_clk = {
+ .halt_reg = 0x48000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_hmss_ahb_clk",
+ .parent_names = (const char *[]){
+ "hmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_hmss_at_clk = {
+ .halt_reg = 0x48010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x48010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_hmss_at_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+<<<<<<<
+=======
+static struct clk_branch gcc_hmss_dvm_bus_clk = {
+ .halt_reg = 0x4808c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4808c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_hmss_dvm_bus_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+>>>>>>>
+static struct clk_branch gcc_hmss_rbcpr_clk = {
+ .halt_reg = 0x48008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x48008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_hmss_rbcpr_clk",
+ .parent_names = (const char *[]){
+ "hmss_rbcpr_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_hmss_trig_clk = {
+ .halt_reg = 0x4800c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_hmss_trig_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+<<<<<<<
+=======
+static struct clk_branch gcc_lpass_at_clk = {
+ .halt_reg = 0x47020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x47020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_lpass_at_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+>>>>>>>
+static struct clk_branch gcc_lpass_trig_clk = {
+ .halt_reg = 0x4701c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_lpass_trig_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mmss_noc_cfg_ahb_clk = {
+ .halt_reg = 0x9004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mmss_noc_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mmss_qm_ahb_clk = {
+ .halt_reg = 0x9030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mmss_qm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mmss_qm_core_clk = {
+ .halt_reg = 0x900c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x900c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mmss_qm_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mmss_sys_noc_axi_clk = {
+ .halt_reg = 0x9000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mmss_sys_noc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_at_clk = {
+ .halt_reg = 0x8a00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8a00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_at_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0x6b014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6b014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_names = (const char *[]){
+ "pcie_aux_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0x6b010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6b010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0x6b00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6b00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0x6b018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6b018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0x6b008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6b008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_aux_clk = {
+ .halt_reg = 0x6f004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6f004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_phy_aux_clk",
+ .parent_names = (const char *[]){
+ "pcie_aux_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_names = (const char *[]){
+ "pdm2_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x33004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x33008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x34004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x14008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x14004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_names = (const char *[]){
+ "sdcc2_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_ahb_clk = {
+ .halt_reg = 0x16008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_apps_clk = {
+ .halt_reg = 0x16004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk",
+ .parent_names = (const char *[]){
+ "sdcc4_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_ahb_clk = {
+ .halt_reg = 0x36004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_inactivity_timers_clk = {
+ .halt_reg = 0x3600c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_inactivity_timers_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_ref_clk = {
+ .halt_reg = 0x36008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ref_clk",
+ .parent_names = (const char *[]){
+ "tsif_ref_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_ahb_clk = {
+ .halt_reg = 0x7500c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7500c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_axi_clk = {
+ .halt_reg = 0x75008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x75008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_axi_clk",
+ .parent_names = (const char *[]){
+ "ufs_axi_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_ice_core_clk = {
+ .halt_reg = 0x7600c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_ice_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_aux_clk = {
+ .halt_reg = 0x76040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x76040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_aux_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_rx_symbol_0_clk = {
+ .halt_reg = 0x75014,
+<<<<<<<
+ .halt_check = BRANCH_HALT,
+=======
+ .halt_check = BRANCH_HALT_SKIP,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0x75014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_rx_symbol_1_clk = {
+ .halt_reg = 0x7605c,
+<<<<<<<
+ .halt_check = BRANCH_HALT,
+=======
+ .halt_check = BRANCH_HALT_SKIP,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0x7605c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_rx_symbol_1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_tx_symbol_0_clk = {
+ .halt_reg = 0x75010,
+<<<<<<<
+ .halt_check = BRANCH_HALT,
+=======
+ .halt_check = BRANCH_HALT_SKIP,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0x75010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_unipro_core_clk = {
+ .halt_reg = 0x76008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x76008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_unipro_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_master_clk = {
+ .halt_reg = 0xf008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_master_clk",
+ .parent_names = (const char *[]){
+ "usb30_master_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mock_utmi_clk = {
+ .halt_reg = 0xf010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_mock_utmi_clk",
+ .parent_names = (const char *[]){
+ "usb30_mock_utmi_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sleep_clk = {
+ .halt_reg = 0xf00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_phy_aux_clk = {
+ .halt_reg = 0x50000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x50000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_phy_aux_clk",
+ .parent_names = (const char *[]){
+ "usb3_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_phy_pipe_clk = {
+ .halt_reg = 0x50004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x50004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = {
+ .halt_reg = 0x6a004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_phy_cfg_ahb2phy_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc pcie_0_gdsc = {
+ .gdscr = 0x6b004,
+ .gds_hw_ctrl = 0x0,
+ .pd = {
+ .name = "pcie_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc ufs_gdsc = {
+ .gdscr = 0x75004,
+ .gds_hw_ctrl = 0x0,
+ .pd = {
+ .name = "ufs_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc usb_30_gdsc = {
+ .gdscr = 0xf004,
+ .gds_hw_ctrl = 0x0,
+ .pd = {
+ .name = "usb_30_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct clk_regmap *gcc_msm8998_clocks[] = {
+ [BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP1_SPI_APPS_CLK_SRC] = &blsp1_qup1_spi_apps_clk_src.clkr,
+ [BLSP1_QUP2_I2C_APPS_CLK_SRC] = &blsp1_qup2_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP2_SPI_APPS_CLK_SRC] = &blsp1_qup2_spi_apps_clk_src.clkr,
+ [BLSP1_QUP3_I2C_APPS_CLK_SRC] = &blsp1_qup3_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP3_SPI_APPS_CLK_SRC] = &blsp1_qup3_spi_apps_clk_src.clkr,
+ [BLSP1_QUP4_I2C_APPS_CLK_SRC] = &blsp1_qup4_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP4_SPI_APPS_CLK_SRC] = &blsp1_qup4_spi_apps_clk_src.clkr,
+ [BLSP1_QUP5_I2C_APPS_CLK_SRC] = &blsp1_qup5_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP5_SPI_APPS_CLK_SRC] = &blsp1_qup5_spi_apps_clk_src.clkr,
+ [BLSP1_QUP6_I2C_APPS_CLK_SRC] = &blsp1_qup6_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP6_SPI_APPS_CLK_SRC] = &blsp1_qup6_spi_apps_clk_src.clkr,
+ [BLSP1_UART1_APPS_CLK_SRC] = &blsp1_uart1_apps_clk_src.clkr,
+ [BLSP1_UART2_APPS_CLK_SRC] = &blsp1_uart2_apps_clk_src.clkr,
+ [BLSP1_UART3_APPS_CLK_SRC] = &blsp1_uart3_apps_clk_src.clkr,
+ [BLSP2_QUP1_I2C_APPS_CLK_SRC] = &blsp2_qup1_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP1_SPI_APPS_CLK_SRC] = &blsp2_qup1_spi_apps_clk_src.clkr,
+ [BLSP2_QUP2_I2C_APPS_CLK_SRC] = &blsp2_qup2_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP2_SPI_APPS_CLK_SRC] = &blsp2_qup2_spi_apps_clk_src.clkr,
+ [BLSP2_QUP3_I2C_APPS_CLK_SRC] = &blsp2_qup3_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP3_SPI_APPS_CLK_SRC] = &blsp2_qup3_spi_apps_clk_src.clkr,
+ [BLSP2_QUP4_I2C_APPS_CLK_SRC] = &blsp2_qup4_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP4_SPI_APPS_CLK_SRC] = &blsp2_qup4_spi_apps_clk_src.clkr,
+ [BLSP2_QUP5_I2C_APPS_CLK_SRC] = &blsp2_qup5_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP5_SPI_APPS_CLK_SRC] = &blsp2_qup5_spi_apps_clk_src.clkr,
+ [BLSP2_QUP6_I2C_APPS_CLK_SRC] = &blsp2_qup6_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP6_SPI_APPS_CLK_SRC] = &blsp2_qup6_spi_apps_clk_src.clkr,
+ [BLSP2_UART1_APPS_CLK_SRC] = &blsp2_uart1_apps_clk_src.clkr,
+ [BLSP2_UART2_APPS_CLK_SRC] = &blsp2_uart2_apps_clk_src.clkr,
+ [BLSP2_UART3_APPS_CLK_SRC] = &blsp2_uart3_apps_clk_src.clkr,
+ [GCC_AGGRE1_NOC_XO_CLK] = &gcc_aggre1_noc_xo_clk.clkr,
+ [GCC_AGGRE1_UFS_AXI_CLK] = &gcc_aggre1_ufs_axi_clk.clkr,
+ [GCC_AGGRE1_USB3_AXI_CLK] = &gcc_aggre1_usb3_axi_clk.clkr,
+ [GCC_APSS_QDSS_TSCTR_DIV2_CLK] = &gcc_apss_qdss_tsctr_div2_clk.clkr,
+ [GCC_APSS_QDSS_TSCTR_DIV8_CLK] = &gcc_apss_qdss_tsctr_div8_clk.clkr,
+ [GCC_BIMC_HMSS_AXI_CLK] = &gcc_bimc_hmss_axi_clk.clkr,
+ [GCC_BIMC_MSS_Q6_AXI_CLK] = &gcc_bimc_mss_q6_axi_clk.clkr,
+ [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
+ [GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP5_I2C_APPS_CLK] = &gcc_blsp1_qup5_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP5_SPI_APPS_CLK] = &gcc_blsp1_qup5_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP6_I2C_APPS_CLK] = &gcc_blsp1_qup6_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP6_SPI_APPS_CLK] = &gcc_blsp1_qup6_spi_apps_clk.clkr,
+ [GCC_BLSP1_SLEEP_CLK] = &gcc_blsp1_sleep_clk.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr,
+ [GCC_BLSP1_UART3_APPS_CLK] = &gcc_blsp1_uart3_apps_clk.clkr,
+ [GCC_BLSP2_AHB_CLK] = &gcc_blsp2_ahb_clk.clkr,
+ [GCC_BLSP2_QUP1_I2C_APPS_CLK] = &gcc_blsp2_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP1_SPI_APPS_CLK] = &gcc_blsp2_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP2_I2C_APPS_CLK] = &gcc_blsp2_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP2_SPI_APPS_CLK] = &gcc_blsp2_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP3_I2C_APPS_CLK] = &gcc_blsp2_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP3_SPI_APPS_CLK] = &gcc_blsp2_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP4_I2C_APPS_CLK] = &gcc_blsp2_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP4_SPI_APPS_CLK] = &gcc_blsp2_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP5_I2C_APPS_CLK] = &gcc_blsp2_qup5_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP5_SPI_APPS_CLK] = &gcc_blsp2_qup5_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP6_I2C_APPS_CLK] = &gcc_blsp2_qup6_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP6_SPI_APPS_CLK] = &gcc_blsp2_qup6_spi_apps_clk.clkr,
+ [GCC_BLSP2_SLEEP_CLK] = &gcc_blsp2_sleep_clk.clkr,
+ [GCC_BLSP2_UART1_APPS_CLK] = &gcc_blsp2_uart1_apps_clk.clkr,
+ [GCC_BLSP2_UART2_APPS_CLK] = &gcc_blsp2_uart2_apps_clk.clkr,
+ [GCC_BLSP2_UART3_APPS_CLK] = &gcc_blsp2_uart3_apps_clk.clkr,
+ [GCC_CFG_NOC_USB3_AXI_CLK] = &gcc_cfg_noc_usb3_axi_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GPU_BIMC_GFX_CLK] = &gcc_gpu_bimc_gfx_clk.clkr,
+ [GCC_GPU_BIMC_GFX_SRC_CLK] = &gcc_gpu_bimc_gfx_src_clk.clkr,
+ [GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_HMSS_AHB_CLK] = &gcc_hmss_ahb_clk.clkr,
+ [GCC_HMSS_AT_CLK] = &gcc_hmss_at_clk.clkr,
+<<<<<<<
+ [GCC_HMSS_DVM_BUS_CLK] = &gcc_hmss_dvm_bus_clk.clkr,
+ [GCC_HMSS_RBCPR_CLK] = &gcc_hmss_rbcpr_clk.clkr,
+ [GCC_HMSS_TRIG_CLK] = &gcc_hmss_trig_clk.clkr,
+ [GCC_LPASS_AT_CLK] = &gcc_lpass_at_clk.clkr,
+=======
+ [GCC_HMSS_RBCPR_CLK] = &gcc_hmss_rbcpr_clk.clkr,
+ [GCC_HMSS_TRIG_CLK] = &gcc_hmss_trig_clk.clkr,
+>>>>>>>
+ [GCC_LPASS_TRIG_CLK] = &gcc_lpass_trig_clk.clkr,
+ [GCC_MMSS_NOC_CFG_AHB_CLK] = &gcc_mmss_noc_cfg_ahb_clk.clkr,
+ [GCC_MMSS_QM_AHB_CLK] = &gcc_mmss_qm_ahb_clk.clkr,
+ [GCC_MMSS_QM_CORE_CLK] = &gcc_mmss_qm_core_clk.clkr,
+ [GCC_MMSS_SYS_NOC_AXI_CLK] = &gcc_mmss_sys_noc_axi_clk.clkr,
+ [GCC_MSS_AT_CLK] = &gcc_mss_at_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_PHY_AUX_CLK] = &gcc_pcie_phy_aux_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
+ [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
+ [GCC_TSIF_AHB_CLK] = &gcc_tsif_ahb_clk.clkr,
+ [GCC_TSIF_INACTIVITY_TIMERS_CLK] = &gcc_tsif_inactivity_timers_clk.clkr,
+ [GCC_TSIF_REF_CLK] = &gcc_tsif_ref_clk.clkr,
+ [GCC_UFS_AHB_CLK] = &gcc_ufs_ahb_clk.clkr,
+ [GCC_UFS_AXI_CLK] = &gcc_ufs_axi_clk.clkr,
+ [GCC_UFS_ICE_CORE_CLK] = &gcc_ufs_ice_core_clk.clkr,
+ [GCC_UFS_PHY_AUX_CLK] = &gcc_ufs_phy_aux_clk.clkr,
+ [GCC_UFS_RX_SYMBOL_0_CLK] = &gcc_ufs_rx_symbol_0_clk.clkr,
+ [GCC_UFS_RX_SYMBOL_1_CLK] = &gcc_ufs_rx_symbol_1_clk.clkr,
+ [GCC_UFS_TX_SYMBOL_0_CLK] = &gcc_ufs_tx_symbol_0_clk.clkr,
+ [GCC_UFS_UNIPRO_CORE_CLK] = &gcc_ufs_unipro_core_clk.clkr,
+ [GCC_USB30_MASTER_CLK] = &gcc_usb30_master_clk.clkr,
+ [GCC_USB30_MOCK_UTMI_CLK] = &gcc_usb30_mock_utmi_clk.clkr,
+ [GCC_USB30_SLEEP_CLK] = &gcc_usb30_sleep_clk.clkr,
+ [GCC_USB3_PHY_AUX_CLK] = &gcc_usb3_phy_aux_clk.clkr,
+ [GCC_USB3_PHY_PIPE_CLK] = &gcc_usb3_phy_pipe_clk.clkr,
+ [GCC_USB_PHY_CFG_AHB2PHY_CLK] = &gcc_usb_phy_cfg_ahb2phy_clk.clkr,
+ [GP1_CLK_SRC] = &gp1_clk_src.clkr,
+ [GP2_CLK_SRC] = &gp2_clk_src.clkr,
+ [GP3_CLK_SRC] = &gp3_clk_src.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_OUT_EVEN] = &gpll0_out_even.clkr,
+ [GPLL0_OUT_MAIN] = &gpll0_out_main.clkr,
+ [GPLL0_OUT_ODD] = &gpll0_out_odd.clkr,
+ [GPLL0_OUT_TEST] = &gpll0_out_test.clkr,
+ [GPLL1] = &gpll1.clkr,
+ [GPLL1_OUT_EVEN] = &gpll1_out_even.clkr,
+ [GPLL1_OUT_MAIN] = &gpll1_out_main.clkr,
+ [GPLL1_OUT_ODD] = &gpll1_out_odd.clkr,
+ [GPLL1_OUT_TEST] = &gpll1_out_test.clkr,
+ [GPLL2] = &gpll2.clkr,
+ [GPLL2_OUT_EVEN] = &gpll2_out_even.clkr,
+ [GPLL2_OUT_MAIN] = &gpll2_out_main.clkr,
+ [GPLL2_OUT_ODD] = &gpll2_out_odd.clkr,
+ [GPLL2_OUT_TEST] = &gpll2_out_test.clkr,
+ [GPLL3] = &gpll3.clkr,
+ [GPLL3_OUT_EVEN] = &gpll3_out_even.clkr,
+ [GPLL3_OUT_MAIN] = &gpll3_out_main.clkr,
+ [GPLL3_OUT_ODD] = &gpll3_out_odd.clkr,
+ [GPLL3_OUT_TEST] = &gpll3_out_test.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [GPLL4_OUT_EVEN] = &gpll4_out_even.clkr,
+ [GPLL4_OUT_MAIN] = &gpll4_out_main.clkr,
+ [GPLL4_OUT_ODD] = &gpll4_out_odd.clkr,
+ [GPLL4_OUT_TEST] = &gpll4_out_test.clkr,
+ [HMSS_AHB_CLK_SRC] = &hmss_ahb_clk_src.clkr,
+ [HMSS_RBCPR_CLK_SRC] = &hmss_rbcpr_clk_src.clkr,
+ [PCIE_AUX_CLK_SRC] = &pcie_aux_clk_src.clkr,
+ [PDM2_CLK_SRC] = &pdm2_clk_src.clkr,
+ [SDCC2_APPS_CLK_SRC] = &sdcc2_apps_clk_src.clkr,
+ [SDCC4_APPS_CLK_SRC] = &sdcc4_apps_clk_src.clkr,
+ [TSIF_REF_CLK_SRC] = &tsif_ref_clk_src.clkr,
+ [UFS_AXI_CLK_SRC] = &ufs_axi_clk_src.clkr,
+ [USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr,
+ [USB30_MOCK_UTMI_CLK_SRC] = &usb30_mock_utmi_clk_src.clkr,
+ [USB3_PHY_AUX_CLK_SRC] = &usb3_phy_aux_clk_src.clkr,
+};
+
+static struct gdsc *gcc_msm8998_gdscs[] = {
+ [PCIE_0_GDSC] = &pcie_0_gdsc,
+ [UFS_GDSC] = &ufs_gdsc,
+ [USB_30_GDSC] = &usb_30_gdsc,
+};
+
+static const struct qcom_reset_map gcc_msm8998_resets[] = {
+ [GCC_BLSP1_QUP1_BCR] = { 0x102400 },
+ [GCC_BLSP1_QUP2_BCR] = { 0x110592 },
+ [GCC_BLSP1_QUP3_BCR] = { 0x118784 },
+ [GCC_BLSP1_QUP4_BCR] = { 0x126976 },
+ [GCC_BLSP1_QUP5_BCR] = { 0x135168 },
+ [GCC_BLSP1_QUP6_BCR] = { 0x143360 },
+ [GCC_BLSP2_QUP1_BCR] = { 0x155648 },
+ [GCC_BLSP2_QUP2_BCR] = { 0x163840 },
+ [GCC_BLSP2_QUP3_BCR] = { 0x172032 },
+ [GCC_BLSP2_QUP4_BCR] = { 0x180224 },
+ [GCC_BLSP2_QUP5_BCR] = { 0x188416 },
+ [GCC_BLSP2_QUP6_BCR] = { 0x196608 },
+ [GCC_PCIE_0_BCR] = { 0x438272 },
+ [GCC_PDM_BCR] = { 0x208896 },
+ [GCC_SDCC2_BCR] = { 0x81920 },
+ [GCC_SDCC4_BCR] = { 0x90112 },
+ [GCC_TSIF_BCR] = { 0x221184 },
+ [GCC_UFS_BCR] = { 0x479232 },
+ [GCC_USB_30_BCR] = { 0x61440 },
+};
+
+static const struct regmap_config gcc_msm8998_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x8f000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_msm8998_desc = {
+ .config = &gcc_msm8998_regmap_config,
+ .clks = gcc_msm8998_clocks,
+ .num_clks = ARRAY_SIZE(gcc_msm8998_clocks),
+ .resets = gcc_msm8998_resets,
+ .num_resets = ARRAY_SIZE(gcc_msm8998_resets),
+ .gdscs = gcc_msm8998_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_msm8998_gdscs),
+};
+
+static int gcc_msm8998_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &gcc_msm8998_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /*
+ * Set the HMSS_AHB_CLK_SLEEP_ENA bit to allow the hmss_ahb_clk to be
+ * turned off by hardware during certain apps low power modes.
+ */
+ ret = regmap_update_bits(regmap, 0x52008, BIT(21), BIT(21));
+ if (ret)
+ return ret;
+
+ return qcom_cc_really_probe(pdev, &gcc_msm8998_desc, regmap);
+}
+
+static const struct of_device_id gcc_msm8998_match_table[] = {
+ { .compatible = "qcom,gcc-msm8998" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_msm8998_match_table);
+
+static struct platform_driver gcc_msm8998_driver = {
+ .probe = gcc_msm8998_probe,
+ .driver = {
+ .name = "gcc-msm8998",
+ .of_match_table = gcc_msm8998_match_table,
+ },
+};
+
+static int __init gcc_msm8998_init(void)
+{
+ return platform_driver_register(&gcc_msm8998_driver);
+}
+core_initcall(gcc_msm8998_init);
+
+static void __exit gcc_msm8998_exit(void)
+{
+ platform_driver_unregister(&gcc_msm8998_driver);
+}
+module_exit(gcc_msm8998_exit);
+
+MODULE_DESCRIPTION("QCOM GCC msm8998 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:gcc-msm8998");
diff --git a/rr-cache/0f0ead2f874d2ed7910c132b40bbe82ac6705cc9/preimage b/rr-cache/0f0ead2f874d2ed7910c132b40bbe82ac6705cc9/preimage
new file mode 100644
index 0000000..529e293
--- /dev/null
+++ b/rr-cache/0f0ead2f874d2ed7910c132b40bbe82ac6705cc9/preimage
@@ -0,0 +1,268 @@
+config QCOM_GDSC
+ bool
+ select PM_GENERIC_DOMAINS if PM
+
+config QCOM_RPMCC
+ bool
+
+config COMMON_CLK_QCOM
+ tristate "Support for Qualcomm's clock controllers"
+ depends on OF
+ depends on ARCH_QCOM || COMPILE_TEST
+ select REGMAP_MMIO
+ select RESET_CONTROLLER
+
+config QCOM_A53PLL
+ tristate "MSM8916 A53 PLL"
+ depends on COMMON_CLK_QCOM
+ default ARCH_QCOM
+ help
+ Support for the A53 PLL on MSM8916 devices. It provides
+ the CPU with frequencies above 1GHz.
+ Say Y if you want to support higher CPU frequencies on MSM8916
+ devices.
+
+config QCOM_CLK_APCS_MSM8916
+ tristate "MSM8916 APCS Clock Controller"
+ depends on COMMON_CLK_QCOM
+ depends on QCOM_APCS_IPC || COMPILE_TEST
+ default ARCH_QCOM
+ help
+ Support for the APCS Clock Controller on msm8916 devices. The
+ APCS is managing the mux and divider which feeds the CPUs.
+ Say Y if you want to support CPU frequency scaling on devices
+ such as msm8916.
+
+config QCOM_CLK_RPM
+ tristate "RPM based Clock Controller"
+ depends on COMMON_CLK_QCOM && MFD_QCOM_RPM
+ select QCOM_RPMCC
+ help
+ The RPM (Resource Power Manager) is a dedicated hardware engine for
+ managing the shared SoC resources in order to keep the lowest power
+ profile. It communicates with other hardware subsystems via shared
+ memory and accepts clock requests, aggregates the requests and turns
+ the clocks on/off or scales them on demand.
+ Say Y if you want to support the clocks exposed by the RPM on
+ platforms such as apq8064, msm8660, msm8960 etc.
+
+config QCOM_CLK_SMD_RPM
+ tristate "RPM over SMD based Clock Controller"
+ depends on COMMON_CLK_QCOM && QCOM_SMD_RPM
+ select QCOM_RPMCC
+ help
+ The RPM (Resource Power Manager) is a dedicated hardware engine for
+ managing the shared SoC resources in order to keep the lowest power
+ profile. It communicates with other hardware subsystems via shared
+ memory and accepts clock requests, aggregates the requests and turns
+ the clocks on/off or scales them on demand.
+ Say Y if you want to support the clocks exposed by the RPM on
+ platforms such as apq8016, apq8084, msm8974 etc.
+
+config APQ_GCC_8084
+ tristate "APQ8084 Global Clock Controller"
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on apq8084 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, SATA, PCIe, etc.
+
+config APQ_MMCC_8084
+ tristate "APQ8084 Multimedia Clock Controller"
+ select APQ_GCC_8084
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the multimedia clock controller on apq8084 devices.
+ Say Y if you want to support multimedia devices such as display,
+ graphics, video encode/decode, camera, etc.
+
+config IPQ_GCC_4019
+ tristate "IPQ4019 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on ipq4019 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, etc.
+
+config IPQ_GCC_806X
+ tristate "IPQ806x Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on ipq806x devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, etc.
+
+config IPQ_LCC_806X
+ tristate "IPQ806x LPASS Clock Controller"
+ select IPQ_GCC_806X
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the LPASS clock controller on ipq806x devices.
+ Say Y if you want to use audio devices such as i2s, pcm,
+ S/PDIF, etc.
+
+config IPQ_GCC_8074
+ tristate "IPQ8074 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for global clock controller on ipq8074 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, etc. Select this for the root clock
+ of ipq8074.
+
+config MSM_GCC_8660
+ tristate "MSM8660 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8660 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, etc.
+
+config MSM_GCC_8916
+ tristate "MSM8916 Global Clock Controller"
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8916 devices.
+ Say Y if you want to use devices such as UART, SPI i2c, USB,
+ SD/eMMC, display, graphics, camera etc.
+
+config MSM_GCC_8960
+ tristate "APQ8064/MSM8960 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on apq8064/msm8960 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, SATA, PCIe, etc.
+
+config MSM_LCC_8960
+ tristate "APQ8064/MSM8960 LPASS Clock Controller"
+ select MSM_GCC_8960
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the LPASS clock controller on apq8064/msm8960 devices.
+ Say Y if you want to use audio devices such as i2s, pcm,
+ SLIMBus, etc.
+
+config MDM_GCC_9615
+ tristate "MDM9615 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on mdm9615 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, etc.
+
+config MDM_LCC_9615
+ tristate "MDM9615 LPASS Clock Controller"
+ select MDM_GCC_9615
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the LPASS clock controller on mdm9615 devices.
+ Say Y if you want to use audio devices such as i2s, pcm,
+ SLIMBus, etc.
+
+config MSM_MMCC_8960
+ tristate "MSM8960 Multimedia Clock Controller"
+ select MSM_GCC_8960
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the multimedia clock controller on msm8960 devices.
+ Say Y if you want to support multimedia devices such as display,
+ graphics, video encode/decode, camera, etc.
+
+config MSM_GCC_8974
+ tristate "MSM8974 Global Clock Controller"
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8974 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, SATA, PCIe, etc.
+
+config MSM_MMCC_8974
+ tristate "MSM8974 Multimedia Clock Controller"
+ select MSM_GCC_8974
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the multimedia clock controller on msm8974 devices.
+ Say Y if you want to support multimedia devices such as display,
+ graphics, video encode/decode, camera, etc.
+
+config MSM_GCC_8994
+ tristate "MSM8994 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8994 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, UFS, SD/eMMC, PCIe, etc.
+
+config MSM_GCC_8996
+ tristate "MSM8996 Global Clock Controller"
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8996 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, UFS, SD/eMMC, PCIe, etc.
+
+config MSM_MMCC_8996
+ tristate "MSM8996 Multimedia Clock Controller"
+ select MSM_GCC_8996
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the multimedia clock controller on msm8996 devices.
+ Say Y if you want to support multimedia devices such as display,
+ graphics, video encode/decode, camera, etc.
+
+<<<<<<<
+config MSM_GCC_8998
+ tristate "MSM8998 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8998 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, UFS, SD/eMMC, PCIe, etc.
+
+config SDM_GCC_845
+ tristate "SDM845 Global Clock Controller"
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on SDM845 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2C, USB, UFS, SDDC, PCIe, etc.
+
+config SDM_VIDEOCC_845
+ tristate "SDM845 Video Clock Controller"
+ depends on COMMON_CLK_QCOM
+ select SDM_GCC_845
+ select QCOM_GDSC
+ help
+ Support for the video clock controller on SDM845 devices.
+ Say Y if you want to support video devices and functionality such as
+ video encode and decode.
+=======
+config SDM_GCC_845
+ tristate "SDM845 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on Qualcomm Technologies, Inc
+ sdm845 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, UFS, SD/eMMC, PCIe, etc.
+
+>>>>>>>
+
+config SPMI_PMIC_CLKDIV
+ tristate "SPMI PMIC clkdiv Support"
+ depends on (COMMON_CLK_QCOM && SPMI) || COMPILE_TEST
+ help
+ This driver supports the clkdiv functionality on the Qualcomm
+ Technologies, Inc. SPMI PMIC. It configures the frequency of
+ clkdiv outputs of the PMIC. These clocks are typically wired
+ through alternate functions on GPIO pins.
+
diff --git a/rr-cache/1113c4083bc0a2b3f76ca5529d22dc317a13669d/postimage.1 b/rr-cache/1113c4083bc0a2b3f76ca5529d22dc317a13669d/postimage.1
new file mode 100644
index 0000000..cc82557
--- /dev/null
+++ b/rr-cache/1113c4083bc0a2b3f76ca5529d22dc317a13669d/postimage.1
@@ -0,0 +1,470 @@
+/*
+ * Copyright (c) 2014-2016,2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8996.dtsi"
+#include "pm8994.dtsi"
+#include "pmi8994.dtsi"
+#include "apq8096-db820c-pins.dtsi"
+#include "apq8096-db820c-pmic-pins.dtsi"
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/sound/qcom,q6afe.h>
+#include <dt-bindings/sound/qcom,q6asm.h>
+/ {
+ aliases {
+ serial0 = &blsp2_uart1;
+ serial1 = &blsp2_uart2;
+ serial2 = &blsp1_uart1;
+ i2c0 = &blsp1_i2c2;
+ i2c1 = &blsp2_i2c1;
+ i2c2 = &blsp2_i2c0;
+ spi0 = &blsp1_spi0;
+ spi1 = &blsp2_spi5;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ clocks {
+ divclk4: divclk4 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ clock-output-names = "divclk4";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&divclk4_pin_a>;
+ };
+ };
+
+ soc {
+ serial@7570000 {
+ label = "BT-UART";
+ status = "okay";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp1_uart1_default>;
+ pinctrl-1 = <&blsp1_uart1_sleep>;
+
+ bluetooth {
+ compatible = "qcom,qca6174-bt";
+
+ /* bt_disable_n gpio */
+ enable-gpios = <&pm8994_gpios 19 GPIO_ACTIVE_HIGH>;
+
+ clocks = <&divclk4>;
+ };
+ };
+
+ serial@75b0000 {
+ label = "LS-UART1";
+ status = "okay";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_uart1_2pins_default>;
+ pinctrl-1 = <&blsp2_uart1_2pins_sleep>;
+ };
+
+// serial@75b1000 {
+// label = "LS-UART0";
+// status = "okay";
+// pinctrl-names = "default", "sleep";
+// pinctrl-0 = <&blsp2_uart2_4pins_default>;
+// pinctrl-1 = <&blsp2_uart2_4pins_sleep>;
+// };
+
+ i2c@7577000 {
+ /* On Low speed expansion */
+ label = "LS-I2C0";
+ status = "okay";
+ };
+
+ i2c@75b6000 {
+ /* On Low speed expansion */
+ label = "LS-I2C1";
+ status = "okay";
+ };
+
+ spi@7575000 {
+ /* On Low speed expansion */
+ label = "LS-SPI0";
+ status = "okay";
+ };
+
+ i2c@75b5000 {
+ /* On High speed expansion */
+ label = "HS-I2C2";
+ status = "okay";
+ };
+
+ spi@75ba000{
+ /* On High speed expansion */
+ label = "HS-SPI1";
+ status = "okay";
+ };
+
+ sdhci@74a4900 {
+ /* External SD card */
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
+ pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>;
+ cd-gpios = <&msmgpio 38 0x1>;
+ vmmc-supply = <&pm8994_l21>;
+ vqmmc-supply = <&pm8994_l13>;
+ status = "okay";
+ };
+
+ phy@627000 {
+ status = "okay";
+ };
+
+ ufshc@624000 {
+ status = "okay";
+ };
+
+ phy@34000 {
+ status = "okay";
+ };
+
+ phy@7410000 {
+ status = "okay";
+ };
+
+ phy@7411000 {
+ status = "okay";
+ };
+
+ phy@7412000 {
+ status = "okay";
+ };
+
+ usb@6a00000 {
+ status = "okay";
+
+ dwc3@6a00000 {
+ extcon = <&usb3_id>;
+ dr_mode = "otg";
+ };
+ };
+
+ usb3_id: usb3-id {
+ compatible = "linux,extcon-usb-gpio";
+ id-gpio = <&pm8994_gpios 22 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb3_vbus_det_gpio>;
+ };
+
+ usb@7600000 {
+ status = "okay";
+
+ dwc3@7600000 {
+ extcon = <&usb2_id>;
+ dr_mode = "otg";
+ maximum-speed = "high-speed";
+ };
+ };
+
+ usb2_id: usb2-id {
+ compatible = "linux,extcon-usb-gpio";
+ id-gpio = <&pmi8994_gpios 6 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb2_vbus_det_gpio>;
+ };
+
+ wlan_en: wlan-en-1-8v {
+ pinctrl-names = "default";
+ pinctrl-0 = <&wlan_en_gpios>;
+ compatible = "regulator-fixed";
+ regulator-name = "wlan-en-regulator";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&pm8994_gpios 8 0>;
+
+ /* WLAN card specific delay */
+ startup-delay-us = <70000>;
+ enable-active-high;
+ };
+
+ agnoc@0 {
+ pcie@600000 {
+ status = "okay";
+ perst-gpio = <&msmgpio 35 GPIO_ACTIVE_LOW>;
+ vddpe-3v3-supply = <&wlan_en>;
+ };
+
+ pcie@608000 {
+ status = "okay";
+ perst-gpio = <&msmgpio 130 GPIO_ACTIVE_LOW>;
+ };
+
+ pcie@610000 {
+ status = "okay";
+ perst-gpio = <&msmgpio 114 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ ufsphy@627000 {
+ status = "okay";
+ };
+
+ ufshc@624000 {
+ status = "okay";
+ };
+
+ mdss@900000 {
+ status = "okay";
+
+ mdp@901000 {
+ status = "okay";
+ };
+
+ hdmi-phy@9a0600 {
+ status = "okay";
+
+ vddio-supply = <&pm8994_l12>;
+ vcca-supply = <&pm8994_l28>;
+ #phy-cells = <0>;
+ };
+
+ hdmi-tx@9a0000 {
+ status = "okay";
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&hdmi_hpd_active &hdmi_ddc_active>;
+ pinctrl-1 = <&hdmi_hpd_suspend &hdmi_ddc_suspend>;
+
+ core-vdda-supply = <&pm8994_l12>;
+ core-vcc-supply = <&pm8994_s4>;
+ #sound-dai-cells = <1>;
+ };
+ };
+
+ slim_msm:sc {
+ tasha_codec: tas {
+ pinctrl-0 = <&cdc_reset_active &wcd_intr_default &audio_mclk>;
+ pinctrl-1 = <&cdc_reset_sleep>;
+ pinctrl-names = "default", "sleep";
+ };
+ };
+ };
+
+ gpio_keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ autorepeat;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&volume_up_gpio>;
+
+ button@0 {
+ label = "Volume Up";
+ linux,code = <KEY_VOLUMEUP>;
+ gpios = <&pm8994_gpios 2 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ rpm-glink {
+ rpm_requests {
+ pm8994-regulators {
+ vdd_l1-supply = <&pm8994_s3>;
+ vdd_l2_l26_l28-supply = <&pm8994_s3>;
+ vdd_l3_l11-supply = <&pm8994_s3>;
+ vdd_l4_l27_l31-supply = <&pm8994_s3>;
+ vdd_l5_l7-supply = <&pm8994_s5>;
+ vdd_l14_l15-supply = <&pm8994_s5>;
+ vdd_l20_l21-supply = <&pm8994_s5>;
+ vdd_l25-supply = <&pm8994_s3>;
+
+ s3 {
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <1300000>;
+ };
+
+ /**
+ * 1.8v required on LS expansion
+ * for mezzanine boards
+ */
+ s4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+ s5 {
+ regulator-min-microvolt = <2150000>;
+ regulator-max-microvolt = <2150000>;
+ };
+ s7 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ };
+
+ l1 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ };
+ l2 {
+ regulator-min-microvolt = <1250000>;
+ regulator-max-microvolt = <1250000>;
+ };
+ l3 {
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ };
+ l4 {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ };
+ l6 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+ l8 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l9 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l10 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l11 {
+ regulator-min-microvolt = <1150000>;
+ regulator-max-microvolt = <1150000>;
+ };
+ l12 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l13 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ };
+ l14 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l15 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l16 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2700000>;
+ };
+ l17 {
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ };
+ l18 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2900000>;
+ };
+ l19 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ };
+ l20 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ regulator-allow-set-load;
+ };
+ l21 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ };
+ l22 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+ l23 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ };
+ l24 {
+ regulator-min-microvolt = <3075000>;
+ regulator-max-microvolt = <3075000>;
+ };
+ l25 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-allow-set-load;
+ };
+ l27 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ };
+ l28 {
+ regulator-min-microvolt = <925000>;
+ regulator-max-microvolt = <925000>;
+ regulator-allow-set-load;
+ };
+ l29 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ };
+ l30 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l32 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ };
+ };
+ };
+ adsp-pil {
+ power-domains = <&gcc HLOS1_VOTE_LPASS_ADSP_GDSC>;
+ smd-edge {
+ apr {
+ iommus = <&lpass_q6_smmu 1>;
+ audio {
+ compatible = "qcom,apq8096-sndcard";
+ qcom,model = "DB820c";
+ qcom,audio-routing =
+ "RX_BIAS", "MCLK";
+
+ fe@1 {
+ is-fe;
+ link-name = "MultiMedia1 Playback";
+ cpu {
+ sound-dai = <&q6asm MSM_FRONTEND_DAI_MULTIMEDIA1>;
+ };
+ platform {
+ sound-dai = <&q6asm MSM_FRONTEND_DAI_MULTIMEDIA1>;
+ };
+ };
+
+ be@1 {
+ link-name = "HDMI Playback";
+ cpu {
+ sound-dai = <&q6afe AFE_PORT_HDMI_RX>;
+ };
+
+ platform {
+ sound-dai = <&q6adm>;
+ };
+
+ codec {
+ sound-dai = <&hdmi 0>;
+ };
+ };
+ };
+ };
+ };
+ };
+};
diff --git a/rr-cache/1113c4083bc0a2b3f76ca5529d22dc317a13669d/preimage b/rr-cache/1113c4083bc0a2b3f76ca5529d22dc317a13669d/preimage
new file mode 100644
index 0000000..86550f5
--- /dev/null
+++ b/rr-cache/1113c4083bc0a2b3f76ca5529d22dc317a13669d/preimage
@@ -0,0 +1,510 @@
+/*
+ * Copyright (c) 2014-2016,2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8996.dtsi"
+#include "pm8994.dtsi"
+#include "pmi8994.dtsi"
+#include "apq8096-db820c-pins.dtsi"
+#include "apq8096-db820c-pmic-pins.dtsi"
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/sound/qcom,q6afe.h>
+#include <dt-bindings/sound/qcom,q6asm.h>
+/ {
+ aliases {
+ serial0 = &blsp2_uart1;
+ serial1 = &blsp2_uart2;
+ serial2 = &blsp1_uart1;
+ i2c0 = &blsp1_i2c2;
+ i2c1 = &blsp2_i2c1;
+ i2c2 = &blsp2_i2c0;
+ spi0 = &blsp1_spi0;
+ spi1 = &blsp2_spi5;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ clocks {
+ divclk4: divclk4 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ clock-output-names = "divclk4";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&divclk4_pin_a>;
+ };
+ };
+
+ soc {
+ serial@7570000 {
+ label = "BT-UART";
+ status = "okay";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp1_uart1_default>;
+ pinctrl-1 = <&blsp1_uart1_sleep>;
+
+ bluetooth {
+ compatible = "qcom,qca6174-bt";
+
+ /* bt_disable_n gpio */
+ enable-gpios = <&pm8994_gpios 19 GPIO_ACTIVE_HIGH>;
+
+ clocks = <&divclk4>;
+ };
+ };
+
+ serial@75b0000 {
+ label = "LS-UART1";
+ status = "okay";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_uart1_2pins_default>;
+ pinctrl-1 = <&blsp2_uart1_2pins_sleep>;
+ };
+
+// serial@75b1000 {
+// label = "LS-UART0";
+// status = "okay";
+// pinctrl-names = "default", "sleep";
+// pinctrl-0 = <&blsp2_uart2_4pins_default>;
+// pinctrl-1 = <&blsp2_uart2_4pins_sleep>;
+// };
+
+ i2c@7577000 {
+ /* On Low speed expansion */
+ label = "LS-I2C0";
+ status = "okay";
+ };
+
+ i2c@75b6000 {
+ /* On Low speed expansion */
+ label = "LS-I2C1";
+ status = "okay";
+ };
+
+ spi@7575000 {
+ /* On Low speed expansion */
+ label = "LS-SPI0";
+ status = "okay";
+ };
+
+ i2c@75b5000 {
+ /* On High speed expansion */
+ label = "HS-I2C2";
+ status = "okay";
+ };
+
+ spi@75ba000{
+ /* On High speed expansion */
+ label = "HS-SPI1";
+ status = "okay";
+ };
+
+ sdhci@74a4900 {
+ /* External SD card */
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
+ pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>;
+ cd-gpios = <&msmgpio 38 0x1>;
+ vmmc-supply = <&pm8994_l21>;
+ vqmmc-supply = <&pm8994_l13>;
+ status = "okay";
+ };
+
+ phy@627000 {
+ status = "okay";
+ };
+
+ ufshc@624000 {
+ status = "okay";
+ };
+
+ phy@34000 {
+ status = "okay";
+ };
+
+ phy@7410000 {
+ status = "okay";
+ };
+
+ phy@7411000 {
+ status = "okay";
+ };
+
+ phy@7412000 {
+ status = "okay";
+ };
+
+ usb@6a00000 {
+ status = "okay";
+
+ dwc3@6a00000 {
+ extcon = <&usb3_id>;
+ dr_mode = "otg";
+ };
+ };
+
+ usb3_id: usb3-id {
+ compatible = "linux,extcon-usb-gpio";
+ id-gpio = <&pm8994_gpios 22 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb3_vbus_det_gpio>;
+ };
+
+ usb@7600000 {
+ status = "okay";
+
+ dwc3@7600000 {
+ extcon = <&usb2_id>;
+ dr_mode = "otg";
+ maximum-speed = "high-speed";
+ };
+ };
+
+ usb2_id: usb2-id {
+ compatible = "linux,extcon-usb-gpio";
+ id-gpio = <&pmi8994_gpios 6 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb2_vbus_det_gpio>;
+ };
+
+<<<<<<<
+ wlan_en: wlan-en-1-8v {
+ pinctrl-names = "default";
+ pinctrl-0 = <&wlan_en_gpios>;
+ compatible = "regulator-fixed";
+ regulator-name = "wlan-en-regulator";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&pm8994_gpios 8 0>;
+
+ /* WLAN card specific delay */
+ startup-delay-us = <70000>;
+ enable-active-high;
+ };
+
+ agnoc@0 {
+ pcie@600000 {
+ status = "okay";
+ perst-gpio = <&msmgpio 35 GPIO_ACTIVE_LOW>;
+ vddpe-3v3-supply = <&wlan_en>;
+=======
+ bt_en: bt-en-1-8v {
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_en_gpios>;
+ compatible = "regulator-fixed";
+ regulator-name = "bt-en-regulator";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&pm8994_gpios 19 0>;
+
+ /* WLAN card specific delay */
+ startup-delay-us = <70000>;
+ enable-active-high;
+ };
+
+ wlan_en: wlan-en-1-8v {
+ pinctrl-names = "default";
+ pinctrl-0 = <&wlan_en_gpios>;
+ compatible = "regulator-fixed";
+ regulator-name = "wlan-en-regulator";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&pm8994_gpios 8 0>;
+
+ /* WLAN card specific delay */
+ startup-delay-us = <70000>;
+ enable-active-high;
+ };
+
+
+ agnoc@0 {
+ qcom,pcie@600000 {
+ status = "okay";
+ perst-gpio = <&msmgpio 35 GPIO_ACTIVE_LOW>;
+ vddpe-3v3-supply = <&wlan_en>;
+ vddpe1-supply = <&bt_en>;
+>>>>>>>
+ };
+
+ pcie@608000 {
+ status = "okay";
+ perst-gpio = <&msmgpio 130 GPIO_ACTIVE_LOW>;
+ };
+
+ pcie@610000 {
+ status = "okay";
+ perst-gpio = <&msmgpio 114 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ ufsphy@627000 {
+ status = "okay";
+ };
+
+ ufshc@624000 {
+ status = "okay";
+ };
+
+ mdss@900000 {
+ status = "okay";
+
+ mdp@901000 {
+ status = "okay";
+ };
+
+ hdmi-phy@9a0600 {
+ status = "okay";
+
+ vddio-supply = <&pm8994_l12>;
+ vcca-supply = <&pm8994_l28>;
+ #phy-cells = <0>;
+ };
+
+ hdmi-tx@9a0000 {
+ status = "okay";
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&hdmi_hpd_active &hdmi_ddc_active>;
+ pinctrl-1 = <&hdmi_hpd_suspend &hdmi_ddc_suspend>;
+
+ core-vdda-supply = <&pm8994_l12>;
+ core-vcc-supply = <&pm8994_s4>;
+ #sound-dai-cells = <1>;
+ };
+ };
+
+ slim_msm:sc {
+ tasha_codec: tas {
+ pinctrl-0 = <&cdc_reset_active &wcd_intr_default &audio_mclk>;
+ pinctrl-1 = <&cdc_reset_sleep>;
+ pinctrl-names = "default", "sleep";
+ };
+ };
+ };
+
+ gpio_keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ autorepeat;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&volume_up_gpio>;
+
+ button@0 {
+ label = "Volume Up";
+ linux,code = <KEY_VOLUMEUP>;
+ gpios = <&pm8994_gpios 2 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ rpm-glink {
+ rpm_requests {
+ pm8994-regulators {
+ vdd_l1-supply = <&pm8994_s3>;
+ vdd_l2_l26_l28-supply = <&pm8994_s3>;
+ vdd_l3_l11-supply = <&pm8994_s3>;
+ vdd_l4_l27_l31-supply = <&pm8994_s3>;
+ vdd_l5_l7-supply = <&pm8994_s5>;
+ vdd_l14_l15-supply = <&pm8994_s5>;
+ vdd_l20_l21-supply = <&pm8994_s5>;
+ vdd_l25-supply = <&pm8994_s3>;
+
+ s3 {
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <1300000>;
+ };
+
+ /**
+ * 1.8v required on LS expansion
+ * for mezzanine boards
+ */
+ s4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+ s5 {
+ regulator-min-microvolt = <2150000>;
+ regulator-max-microvolt = <2150000>;
+ };
+ s7 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ };
+
+ l1 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ };
+ l2 {
+ regulator-min-microvolt = <1250000>;
+ regulator-max-microvolt = <1250000>;
+ };
+ l3 {
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ };
+ l4 {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ };
+ l6 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+ l8 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l9 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l10 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l11 {
+ regulator-min-microvolt = <1150000>;
+ regulator-max-microvolt = <1150000>;
+ };
+ l12 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l13 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ };
+ l14 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l15 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l16 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2700000>;
+ };
+ l17 {
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ };
+ l18 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2900000>;
+ };
+ l19 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ };
+ l20 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ regulator-allow-set-load;
+ };
+ l21 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ };
+ l22 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+ l23 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ };
+ l24 {
+ regulator-min-microvolt = <3075000>;
+ regulator-max-microvolt = <3075000>;
+ };
+ l25 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-allow-set-load;
+ };
+ l27 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ };
+ l28 {
+ regulator-min-microvolt = <925000>;
+ regulator-max-microvolt = <925000>;
+ regulator-allow-set-load;
+ };
+ l29 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ };
+ l30 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l32 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ };
+ };
+ };
+ adsp-pil {
+ power-domains = <&gcc HLOS1_VOTE_LPASS_ADSP_GDSC>;
+ smd-edge {
+ apr {
+ iommus = <&lpass_q6_smmu 1>;
+ audio {
+ compatible = "qcom,apq8096-sndcard";
+ qcom,model = "DB820c";
+ qcom,audio-routing =
+ "RX_BIAS", "MCLK";
+
+ fe@1 {
+ is-fe;
+ link-name = "MultiMedia1 Playback";
+ cpu {
+ sound-dai = <&q6asm MSM_FRONTEND_DAI_MULTIMEDIA1>;
+ };
+ platform {
+ sound-dai = <&q6asm MSM_FRONTEND_DAI_MULTIMEDIA1>;
+ };
+ };
+
+ be@1 {
+ link-name = "HDMI Playback";
+ cpu {
+ sound-dai = <&q6afe AFE_PORT_HDMI_RX>;
+ };
+
+ platform {
+ sound-dai = <&q6adm>;
+ };
+
+ codec {
+ sound-dai = <&hdmi 0>;
+ };
+ };
+ };
+ };
+ };
+ };
+};
diff --git a/rr-cache/1113c4083bc0a2b3f76ca5529d22dc317a13669d/preimage.1 b/rr-cache/1113c4083bc0a2b3f76ca5529d22dc317a13669d/preimage.1
new file mode 100644
index 0000000..86550f5
--- /dev/null
+++ b/rr-cache/1113c4083bc0a2b3f76ca5529d22dc317a13669d/preimage.1
@@ -0,0 +1,510 @@
+/*
+ * Copyright (c) 2014-2016,2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8996.dtsi"
+#include "pm8994.dtsi"
+#include "pmi8994.dtsi"
+#include "apq8096-db820c-pins.dtsi"
+#include "apq8096-db820c-pmic-pins.dtsi"
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/sound/qcom,q6afe.h>
+#include <dt-bindings/sound/qcom,q6asm.h>
+/ {
+ aliases {
+ serial0 = &blsp2_uart1;
+ serial1 = &blsp2_uart2;
+ serial2 = &blsp1_uart1;
+ i2c0 = &blsp1_i2c2;
+ i2c1 = &blsp2_i2c1;
+ i2c2 = &blsp2_i2c0;
+ spi0 = &blsp1_spi0;
+ spi1 = &blsp2_spi5;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ clocks {
+ divclk4: divclk4 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ clock-output-names = "divclk4";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&divclk4_pin_a>;
+ };
+ };
+
+ soc {
+ serial@7570000 {
+ label = "BT-UART";
+ status = "okay";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp1_uart1_default>;
+ pinctrl-1 = <&blsp1_uart1_sleep>;
+
+ bluetooth {
+ compatible = "qcom,qca6174-bt";
+
+ /* bt_disable_n gpio */
+ enable-gpios = <&pm8994_gpios 19 GPIO_ACTIVE_HIGH>;
+
+ clocks = <&divclk4>;
+ };
+ };
+
+ serial@75b0000 {
+ label = "LS-UART1";
+ status = "okay";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_uart1_2pins_default>;
+ pinctrl-1 = <&blsp2_uart1_2pins_sleep>;
+ };
+
+// serial@75b1000 {
+// label = "LS-UART0";
+// status = "okay";
+// pinctrl-names = "default", "sleep";
+// pinctrl-0 = <&blsp2_uart2_4pins_default>;
+// pinctrl-1 = <&blsp2_uart2_4pins_sleep>;
+// };
+
+ i2c@7577000 {
+ /* On Low speed expansion */
+ label = "LS-I2C0";
+ status = "okay";
+ };
+
+ i2c@75b6000 {
+ /* On Low speed expansion */
+ label = "LS-I2C1";
+ status = "okay";
+ };
+
+ spi@7575000 {
+ /* On Low speed expansion */
+ label = "LS-SPI0";
+ status = "okay";
+ };
+
+ i2c@75b5000 {
+ /* On High speed expansion */
+ label = "HS-I2C2";
+ status = "okay";
+ };
+
+ spi@75ba000{
+ /* On High speed expansion */
+ label = "HS-SPI1";
+ status = "okay";
+ };
+
+ sdhci@74a4900 {
+ /* External SD card */
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
+ pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>;
+ cd-gpios = <&msmgpio 38 0x1>;
+ vmmc-supply = <&pm8994_l21>;
+ vqmmc-supply = <&pm8994_l13>;
+ status = "okay";
+ };
+
+ phy@627000 {
+ status = "okay";
+ };
+
+ ufshc@624000 {
+ status = "okay";
+ };
+
+ phy@34000 {
+ status = "okay";
+ };
+
+ phy@7410000 {
+ status = "okay";
+ };
+
+ phy@7411000 {
+ status = "okay";
+ };
+
+ phy@7412000 {
+ status = "okay";
+ };
+
+ usb@6a00000 {
+ status = "okay";
+
+ dwc3@6a00000 {
+ extcon = <&usb3_id>;
+ dr_mode = "otg";
+ };
+ };
+
+ usb3_id: usb3-id {
+ compatible = "linux,extcon-usb-gpio";
+ id-gpio = <&pm8994_gpios 22 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb3_vbus_det_gpio>;
+ };
+
+ usb@7600000 {
+ status = "okay";
+
+ dwc3@7600000 {
+ extcon = <&usb2_id>;
+ dr_mode = "otg";
+ maximum-speed = "high-speed";
+ };
+ };
+
+ usb2_id: usb2-id {
+ compatible = "linux,extcon-usb-gpio";
+ id-gpio = <&pmi8994_gpios 6 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb2_vbus_det_gpio>;
+ };
+
+<<<<<<<
+ wlan_en: wlan-en-1-8v {
+ pinctrl-names = "default";
+ pinctrl-0 = <&wlan_en_gpios>;
+ compatible = "regulator-fixed";
+ regulator-name = "wlan-en-regulator";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&pm8994_gpios 8 0>;
+
+ /* WLAN card specific delay */
+ startup-delay-us = <70000>;
+ enable-active-high;
+ };
+
+ agnoc@0 {
+ pcie@600000 {
+ status = "okay";
+ perst-gpio = <&msmgpio 35 GPIO_ACTIVE_LOW>;
+ vddpe-3v3-supply = <&wlan_en>;
+=======
+ bt_en: bt-en-1-8v {
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_en_gpios>;
+ compatible = "regulator-fixed";
+ regulator-name = "bt-en-regulator";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&pm8994_gpios 19 0>;
+
+ /* WLAN card specific delay */
+ startup-delay-us = <70000>;
+ enable-active-high;
+ };
+
+ wlan_en: wlan-en-1-8v {
+ pinctrl-names = "default";
+ pinctrl-0 = <&wlan_en_gpios>;
+ compatible = "regulator-fixed";
+ regulator-name = "wlan-en-regulator";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&pm8994_gpios 8 0>;
+
+ /* WLAN card specific delay */
+ startup-delay-us = <70000>;
+ enable-active-high;
+ };
+
+
+ agnoc@0 {
+ qcom,pcie@600000 {
+ status = "okay";
+ perst-gpio = <&msmgpio 35 GPIO_ACTIVE_LOW>;
+ vddpe-3v3-supply = <&wlan_en>;
+ vddpe1-supply = <&bt_en>;
+>>>>>>>
+ };
+
+ pcie@608000 {
+ status = "okay";
+ perst-gpio = <&msmgpio 130 GPIO_ACTIVE_LOW>;
+ };
+
+ pcie@610000 {
+ status = "okay";
+ perst-gpio = <&msmgpio 114 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ ufsphy@627000 {
+ status = "okay";
+ };
+
+ ufshc@624000 {
+ status = "okay";
+ };
+
+ mdss@900000 {
+ status = "okay";
+
+ mdp@901000 {
+ status = "okay";
+ };
+
+ hdmi-phy@9a0600 {
+ status = "okay";
+
+ vddio-supply = <&pm8994_l12>;
+ vcca-supply = <&pm8994_l28>;
+ #phy-cells = <0>;
+ };
+
+ hdmi-tx@9a0000 {
+ status = "okay";
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&hdmi_hpd_active &hdmi_ddc_active>;
+ pinctrl-1 = <&hdmi_hpd_suspend &hdmi_ddc_suspend>;
+
+ core-vdda-supply = <&pm8994_l12>;
+ core-vcc-supply = <&pm8994_s4>;
+ #sound-dai-cells = <1>;
+ };
+ };
+
+ slim_msm:sc {
+ tasha_codec: tas {
+ pinctrl-0 = <&cdc_reset_active &wcd_intr_default &audio_mclk>;
+ pinctrl-1 = <&cdc_reset_sleep>;
+ pinctrl-names = "default", "sleep";
+ };
+ };
+ };
+
+ gpio_keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ autorepeat;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&volume_up_gpio>;
+
+ button@0 {
+ label = "Volume Up";
+ linux,code = <KEY_VOLUMEUP>;
+ gpios = <&pm8994_gpios 2 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ rpm-glink {
+ rpm_requests {
+ pm8994-regulators {
+ vdd_l1-supply = <&pm8994_s3>;
+ vdd_l2_l26_l28-supply = <&pm8994_s3>;
+ vdd_l3_l11-supply = <&pm8994_s3>;
+ vdd_l4_l27_l31-supply = <&pm8994_s3>;
+ vdd_l5_l7-supply = <&pm8994_s5>;
+ vdd_l14_l15-supply = <&pm8994_s5>;
+ vdd_l20_l21-supply = <&pm8994_s5>;
+ vdd_l25-supply = <&pm8994_s3>;
+
+ s3 {
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <1300000>;
+ };
+
+ /**
+ * 1.8v required on LS expansion
+ * for mezzanine boards
+ */
+ s4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+ s5 {
+ regulator-min-microvolt = <2150000>;
+ regulator-max-microvolt = <2150000>;
+ };
+ s7 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ };
+
+ l1 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ };
+ l2 {
+ regulator-min-microvolt = <1250000>;
+ regulator-max-microvolt = <1250000>;
+ };
+ l3 {
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ };
+ l4 {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ };
+ l6 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+ l8 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l9 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l10 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l11 {
+ regulator-min-microvolt = <1150000>;
+ regulator-max-microvolt = <1150000>;
+ };
+ l12 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l13 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ };
+ l14 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l15 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l16 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2700000>;
+ };
+ l17 {
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ };
+ l18 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2900000>;
+ };
+ l19 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ };
+ l20 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ regulator-allow-set-load;
+ };
+ l21 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ };
+ l22 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+ l23 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ };
+ l24 {
+ regulator-min-microvolt = <3075000>;
+ regulator-max-microvolt = <3075000>;
+ };
+ l25 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-allow-set-load;
+ };
+ l27 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ };
+ l28 {
+ regulator-min-microvolt = <925000>;
+ regulator-max-microvolt = <925000>;
+ regulator-allow-set-load;
+ };
+ l29 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ };
+ l30 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l32 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ };
+ };
+ };
+ adsp-pil {
+ power-domains = <&gcc HLOS1_VOTE_LPASS_ADSP_GDSC>;
+ smd-edge {
+ apr {
+ iommus = <&lpass_q6_smmu 1>;
+ audio {
+ compatible = "qcom,apq8096-sndcard";
+ qcom,model = "DB820c";
+ qcom,audio-routing =
+ "RX_BIAS", "MCLK";
+
+ fe@1 {
+ is-fe;
+ link-name = "MultiMedia1 Playback";
+ cpu {
+ sound-dai = <&q6asm MSM_FRONTEND_DAI_MULTIMEDIA1>;
+ };
+ platform {
+ sound-dai = <&q6asm MSM_FRONTEND_DAI_MULTIMEDIA1>;
+ };
+ };
+
+ be@1 {
+ link-name = "HDMI Playback";
+ cpu {
+ sound-dai = <&q6afe AFE_PORT_HDMI_RX>;
+ };
+
+ platform {
+ sound-dai = <&q6adm>;
+ };
+
+ codec {
+ sound-dai = <&hdmi 0>;
+ };
+ };
+ };
+ };
+ };
+ };
+};
diff --git a/rr-cache/1113c4083bc0a2b3f76ca5529d22dc317a13669d/thisimage.1 b/rr-cache/1113c4083bc0a2b3f76ca5529d22dc317a13669d/thisimage.1
new file mode 100644
index 0000000..fc156c0
--- /dev/null
+++ b/rr-cache/1113c4083bc0a2b3f76ca5529d22dc317a13669d/thisimage.1
@@ -0,0 +1,501 @@
+/*
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8996.dtsi"
+#include "pm8994.dtsi"
+#include "pmi8994.dtsi"
+#include "apq8096-db820c-pins.dtsi"
+#include "apq8096-db820c-pmic-pins.dtsi"
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/sound/qcom,q6afe.h>
+#include <dt-bindings/sound/qcom,q6asm.h>
+/ {
+ aliases {
+ serial0 = &blsp2_uart1;
+ serial1 = &blsp2_uart2;
+ serial2 = &blsp1_uart1;
+ i2c0 = &blsp1_i2c2;
+ i2c1 = &blsp2_i2c1;
+ i2c2 = &blsp2_i2c0;
+ spi0 = &blsp1_spi0;
+ spi1 = &blsp2_spi5;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ clocks {
+ divclk4: divclk4 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ clock-output-names = "divclk4";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&divclk4_pin_a>;
+ };
+ };
+
+ soc {
+ serial@7570000 {
+ label = "BT-UART";
+ status = "okay";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp1_uart1_default>;
+ pinctrl-1 = <&blsp1_uart1_sleep>;
+
+ bluetooth {
+ compatible = "qcom,qca6174-bt";
+
+ /* bt_disable_n gpio */
+ enable-gpios = <&pm8994_gpios 19 GPIO_ACTIVE_HIGH>;
+
+ clocks = <&divclk4>;
+ };
+ };
+
+ serial@75b0000 {
+ label = "LS-UART1";
+ status = "okay";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_uart1_2pins_default>;
+ pinctrl-1 = <&blsp2_uart1_2pins_sleep>;
+ };
+
+ serial@75b1000 {
+ label = "LS-UART0";
+ status = "okay";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_uart2_4pins_default>;
+ pinctrl-1 = <&blsp2_uart2_4pins_sleep>;
+ };
+
+ i2c@7577000 {
+ /* On Low speed expansion */
+ label = "LS-I2C0";
+ status = "okay";
+ };
+
+ i2c@75b6000 {
+ /* On Low speed expansion */
+ label = "LS-I2C1";
+ status = "okay";
+ };
+
+ spi@7575000 {
+ /* On Low speed expansion */
+ label = "LS-SPI0";
+ status = "okay";
+ };
+
+ i2c@75b5000 {
+ /* On High speed expansion */
+ label = "HS-I2C2";
+ status = "okay";
+ };
+
+ spi@75ba000{
+ /* On High speed expansion */
+ label = "HS-SPI1";
+ status = "okay";
+ };
+
+ sdhci@74a4900 {
+ /* External SD card */
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
+ pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>;
+ cd-gpios = <&msmgpio 38 0x1>;
+ vmmc-supply = <&pm8994_l21>;
+ vqmmc-supply = <&pm8994_l13>;
+ status = "okay";
+ };
+
+ phy@627000 {
+ status = "okay";
+ };
+
+ ufshc@624000 {
+ status = "okay";
+ };
+
+ phy@34000 {
+ status = "okay";
+ };
+
+ phy@7410000 {
+ status = "okay";
+ };
+
+ phy@7411000 {
+ status = "okay";
+ };
+
+ phy@7412000 {
+ status = "okay";
+ };
+
+ usb@6a00000 {
+ status = "okay";
+
+ dwc3@6a00000 {
+ extcon = <&usb3_id>;
+ dr_mode = "otg";
+ };
+ };
+
+ usb3_id: usb3-id {
+ compatible = "linux,extcon-usb-gpio";
+ id-gpio = <&pm8994_gpios 22 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb3_vbus_det_gpio>;
+ };
+
+ usb@7600000 {
+ status = "okay";
+
+ dwc3@7600000 {
+ extcon = <&usb2_id>;
+ dr_mode = "otg";
+ maximum-speed = "high-speed";
+ };
+ };
+
+ usb2_id: usb2-id {
+ compatible = "linux,extcon-usb-gpio";
+ id-gpio = <&pmi8994_gpios 6 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb2_vbus_det_gpio>;
+ };
+
+<<<<<<<
+ wlan_en: wlan-en-1-8v {
+ pinctrl-names = "default";
+ pinctrl-0 = <&wlan_en_gpios>;
+ compatible = "regulator-fixed";
+ regulator-name = "wlan-en-regulator";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&pm8994_gpios 8 0>;
+
+ /* WLAN card specific delay */
+ startup-delay-us = <70000>;
+ enable-active-high;
+ };
+
+ agnoc@0 {
+ pcie@600000 {
+ status = "okay";
+ perst-gpio = <&msmgpio 35 GPIO_ACTIVE_LOW>;
+ vddpe-3v3-supply = <&wlan_en>;
+=======
+ bt_en: bt-en-1-8v {
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_en_gpios>;
+ compatible = "regulator-fixed";
+ regulator-name = "bt-en-regulator";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&pm8994_gpios 19 0>;
+
+ /* WLAN card specific delay */
+ startup-delay-us = <70000>;
+ enable-active-high;
+ };
+
+ wlan_en: wlan-en-1-8v {
+ pinctrl-names = "default";
+ pinctrl-0 = <&wlan_en_gpios>;
+ compatible = "regulator-fixed";
+ regulator-name = "wlan-en-regulator";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&pm8994_gpios 8 0>;
+
+ /* WLAN card specific delay */
+ startup-delay-us = <70000>;
+ enable-active-high;
+ };
+
+
+ agnoc@0 {
+ qcom,pcie@600000 {
+ status = "okay";
+ perst-gpio = <&msmgpio 35 GPIO_ACTIVE_LOW>;
+ vddpe-3v3-supply = <&wlan_en>;
+ vddpe1-supply = <&bt_en>;
+>>>>>>>
+ };
+
+ pcie@608000 {
+ status = "okay";
+ perst-gpio = <&msmgpio 130 GPIO_ACTIVE_LOW>;
+ };
+
+ pcie@610000 {
+ status = "okay";
+ perst-gpio = <&msmgpio 114 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ ufsphy@627000 {
+ status = "okay";
+ };
+
+ ufshc@624000 {
+ status = "okay";
+ };
+
+ mdss@900000 {
+ status = "okay";
+
+ mdp@901000 {
+ status = "okay";
+ };
+
+ hdmi-phy@9a0600 {
+ status = "okay";
+
+ vddio-supply = <&pm8994_l12>;
+ vcca-supply = <&pm8994_l28>;
+ };
+
+ hdmi-tx@9a0000 {
+ status = "okay";
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&hdmi_hpd_active &hdmi_ddc_active>;
+ pinctrl-1 = <&hdmi_hpd_suspend &hdmi_ddc_suspend>;
+
+ core-vdda-supply = <&pm8994_l12>;
+ core-vcc-supply = <&pm8994_s4>;
+ #sound-dai-cells = <1>;
+ };
+ };
+ };
+
+ gpio_keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ autorepeat;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&volume_up_gpio>;
+
+ button@0 {
+ label = "Volume Up";
+ linux,code = <KEY_VOLUMEUP>;
+ gpios = <&pm8994_gpios 2 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ rpm-glink {
+ rpm_requests {
+ pm8994-regulators {
+ vdd_l1-supply = <&pm8994_s3>;
+ vdd_l2_l26_l28-supply = <&pm8994_s3>;
+ vdd_l3_l11-supply = <&pm8994_s3>;
+ vdd_l4_l27_l31-supply = <&pm8994_s3>;
+ vdd_l5_l7-supply = <&pm8994_s5>;
+ vdd_l14_l15-supply = <&pm8994_s5>;
+ vdd_l20_l21-supply = <&pm8994_s5>;
+ vdd_l25-supply = <&pm8994_s3>;
+
+ s3 {
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <1300000>;
+ };
+
+ /**
+ * 1.8v required on LS expansion
+ * for mezzanine boards
+ */
+ s4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+ s5 {
+ regulator-min-microvolt = <2150000>;
+ regulator-max-microvolt = <2150000>;
+ };
+ s7 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ };
+
+ l1 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ };
+ l2 {
+ regulator-min-microvolt = <1250000>;
+ regulator-max-microvolt = <1250000>;
+ };
+ l3 {
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ };
+ l4 {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ };
+ l6 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+ l8 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l9 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l10 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l11 {
+ regulator-min-microvolt = <1150000>;
+ regulator-max-microvolt = <1150000>;
+ };
+ l12 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l13 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ };
+ l14 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l15 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l16 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2700000>;
+ };
+ l17 {
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ };
+ l18 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2900000>;
+ };
+ l19 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ };
+ l20 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ regulator-allow-set-load;
+ };
+ l21 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ };
+ l22 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+ l23 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ };
+ l24 {
+ regulator-min-microvolt = <3075000>;
+ regulator-max-microvolt = <3075000>;
+ };
+ l25 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-allow-set-load;
+ };
+ l27 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ };
+ l28 {
+ regulator-min-microvolt = <925000>;
+ regulator-max-microvolt = <925000>;
+ regulator-allow-set-load;
+ };
+ l29 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ };
+ l30 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l32 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ };
+ };
+ };
+ adsp-pil {
+ power-domains = <&gcc HLOS1_VOTE_LPASS_ADSP_GDSC>;
+ smd-edge {
+ apr {
+ iommus = <&lpass_q6_smmu 1>;
+ audio {
+ compatible = "qcom,apq8096-sndcard";
+ qcom,model = "DB820c";
+ qcom,audio-routing =
+ "RX_BIAS", "MCLK";
+
+ fe@1 {
+ is-fe;
+ link-name = "MultiMedia1 Playback";
+ cpu {
+ sound-dai = <&q6asm MSM_FRONTEND_DAI_MULTIMEDIA1>;
+ };
+ platform {
+ sound-dai = <&q6asm MSM_FRONTEND_DAI_MULTIMEDIA1>;
+ };
+ };
+
+ be@1 {
+ link-name = "HDMI Playback";
+ cpu {
+ sound-dai = <&q6afe AFE_PORT_HDMI_RX>;
+ };
+
+ platform {
+ sound-dai = <&q6adm>;
+ };
+
+ codec {
+ sound-dai = <&hdmi 0>;
+ };
+ };
+ };
+ };
+ };
+ };
+};
diff --git a/rr-cache/1225921e44e1228d6ac5edfc49b9bf583677fd71/postimage.2 b/rr-cache/1225921e44e1228d6ac5edfc49b9bf583677fd71/postimage.2
new file mode 100644
index 0000000..3c00765
--- /dev/null
+++ b/rr-cache/1225921e44e1228d6ac5edfc49b9bf583677fd71/postimage.2
@@ -0,0 +1,22 @@
+Qualcomm Technologies, Inc. RPMh Clocks
+-------------------------------------------------------
+
+Resource Power Manager Hardened (RPMh) manages shared resources on
+some Qualcomm Technologies Inc. SoCs. It accepts clock requests from
+other hardware subsystems via RSC to control clocks.
+
+Required properties :
+- compatible : shall contain "qcom,sdm845-rpmh-clk"
+
+- #clock-cells : must contain 1
+
+Example :
+
+#include <dt-bindings/clock/qcom,rpmh.h>
+
+ &apps_rsc {
+ rpmhcc: clock-controller {
+ compatible = "qcom,sdm845-rpmh-clk";
+ #clock-cells = <1>;
+ };
+ };
diff --git a/rr-cache/1225921e44e1228d6ac5edfc49b9bf583677fd71/preimage b/rr-cache/1225921e44e1228d6ac5edfc49b9bf583677fd71/preimage
new file mode 100644
index 0000000..e551552
--- /dev/null
+++ b/rr-cache/1225921e44e1228d6ac5edfc49b9bf583677fd71/preimage
@@ -0,0 +1,31 @@
+Qualcomm Technologies, Inc. RPMh Clocks
+-------------------------------------------------------
+
+Resource Power Manager Hardened (RPMh) manages shared resources on
+some Qualcomm Technologies Inc. SoCs. It accepts clock requests from
+other hardware subsystems via RSC to control clocks.
+
+Required properties :
+<<<<<<<
+- compatible : shall contain "qcom,rpmh-clk-sdm845"
+=======
+- compatible : shall contain "qcom,sdm845-rpmh-clk"
+>>>>>>>
+
+- #clock-cells : must contain 1
+
+Example :
+
+#include <dt-bindings/clock/qcom,rpmh.h>
+
+ &apps_rsc {
+<<<<<<<
+ rpmh: clock-controller {
+ compatible = "qcom,rpmh-clk-sdm845";
+=======
+ rpmhcc: clock-controller {
+ compatible = "qcom,sdm845-rpmh-clk";
+>>>>>>>
+ #clock-cells = <1>;
+ };
+ };
diff --git a/rr-cache/1225921e44e1228d6ac5edfc49b9bf583677fd71/preimage.1 b/rr-cache/1225921e44e1228d6ac5edfc49b9bf583677fd71/preimage.1
new file mode 100644
index 0000000..e551552
--- /dev/null
+++ b/rr-cache/1225921e44e1228d6ac5edfc49b9bf583677fd71/preimage.1
@@ -0,0 +1,31 @@
+Qualcomm Technologies, Inc. RPMh Clocks
+-------------------------------------------------------
+
+Resource Power Manager Hardened (RPMh) manages shared resources on
+some Qualcomm Technologies Inc. SoCs. It accepts clock requests from
+other hardware subsystems via RSC to control clocks.
+
+Required properties :
+<<<<<<<
+- compatible : shall contain "qcom,rpmh-clk-sdm845"
+=======
+- compatible : shall contain "qcom,sdm845-rpmh-clk"
+>>>>>>>
+
+- #clock-cells : must contain 1
+
+Example :
+
+#include <dt-bindings/clock/qcom,rpmh.h>
+
+ &apps_rsc {
+<<<<<<<
+ rpmh: clock-controller {
+ compatible = "qcom,rpmh-clk-sdm845";
+=======
+ rpmhcc: clock-controller {
+ compatible = "qcom,sdm845-rpmh-clk";
+>>>>>>>
+ #clock-cells = <1>;
+ };
+ };
diff --git a/rr-cache/1225921e44e1228d6ac5edfc49b9bf583677fd71/preimage.2 b/rr-cache/1225921e44e1228d6ac5edfc49b9bf583677fd71/preimage.2
new file mode 100644
index 0000000..e551552
--- /dev/null
+++ b/rr-cache/1225921e44e1228d6ac5edfc49b9bf583677fd71/preimage.2
@@ -0,0 +1,31 @@
+Qualcomm Technologies, Inc. RPMh Clocks
+-------------------------------------------------------
+
+Resource Power Manager Hardened (RPMh) manages shared resources on
+some Qualcomm Technologies Inc. SoCs. It accepts clock requests from
+other hardware subsystems via RSC to control clocks.
+
+Required properties :
+<<<<<<<
+- compatible : shall contain "qcom,rpmh-clk-sdm845"
+=======
+- compatible : shall contain "qcom,sdm845-rpmh-clk"
+>>>>>>>
+
+- #clock-cells : must contain 1
+
+Example :
+
+#include <dt-bindings/clock/qcom,rpmh.h>
+
+ &apps_rsc {
+<<<<<<<
+ rpmh: clock-controller {
+ compatible = "qcom,rpmh-clk-sdm845";
+=======
+ rpmhcc: clock-controller {
+ compatible = "qcom,sdm845-rpmh-clk";
+>>>>>>>
+ #clock-cells = <1>;
+ };
+ };
diff --git a/rr-cache/1225921e44e1228d6ac5edfc49b9bf583677fd71/thisimage.2 b/rr-cache/1225921e44e1228d6ac5edfc49b9bf583677fd71/thisimage.2
new file mode 100644
index 0000000..e551552
--- /dev/null
+++ b/rr-cache/1225921e44e1228d6ac5edfc49b9bf583677fd71/thisimage.2
@@ -0,0 +1,31 @@
+Qualcomm Technologies, Inc. RPMh Clocks
+-------------------------------------------------------
+
+Resource Power Manager Hardened (RPMh) manages shared resources on
+some Qualcomm Technologies Inc. SoCs. It accepts clock requests from
+other hardware subsystems via RSC to control clocks.
+
+Required properties :
+<<<<<<<
+- compatible : shall contain "qcom,rpmh-clk-sdm845"
+=======
+- compatible : shall contain "qcom,sdm845-rpmh-clk"
+>>>>>>>
+
+- #clock-cells : must contain 1
+
+Example :
+
+#include <dt-bindings/clock/qcom,rpmh.h>
+
+ &apps_rsc {
+<<<<<<<
+ rpmh: clock-controller {
+ compatible = "qcom,rpmh-clk-sdm845";
+=======
+ rpmhcc: clock-controller {
+ compatible = "qcom,sdm845-rpmh-clk";
+>>>>>>>
+ #clock-cells = <1>;
+ };
+ };
diff --git a/rr-cache/17b1ce0c1269b821be9f793e477f850010223173/postimage b/rr-cache/17b1ce0c1269b821be9f793e477f850010223173/postimage
new file mode 100644
index 0000000..ca883c6
--- /dev/null
+++ b/rr-cache/17b1ce0c1269b821be9f793e477f850010223173/postimage
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SDM845 MTP board device tree source
+ *
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include "sdm845.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDM845 MTP";
+ compatible = "qcom,sdm845-mtp";
+};
+
+
+&apps_rsc {
+ pm8998-rpmh-regulators {
+ vdd_l7_l12_l14_l15-supply = <&pm8998_s5>;
+
+ smps2 {
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ qcom,regulator-initial-voltage = <1100000>;
+ };
+
+ smps5 {
+ regulator-min-microvolt = <1904000>;
+ regulator-max-microvolt = <2040000>;
+ qcom,regulator-initial-voltage = <1904000>;
+ };
+
+ ldo1 {
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <880000>;
+ qcom,regulator-initial-voltage = <880000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ qcom,allowed-modes =
+ <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ qcom,mode-threshold-currents = <0 1>;
+ };
+
+ ldo2 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ qcom,regulator-initial-voltage = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ qcom,allowed-modes =
+ <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ qcom,mode-threshold-currents = <0 30000>;
+ };
+
+
+ ldo7 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ qcom,regulator-initial-voltage = <1800000>;
+ qcom,headroom-voltage = <56000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ regulator-allow-set-load;
+ qcom,allowed-modes =
+ <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ qcom,mode-threshold-currents = <0 10000>;
+ };
+
+ ldo20 {
+ regulator-min-microvolt = <2704000>;
+ regulator-max-microvolt = <2960000>;
+ qcom,regulator-initial-voltage = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ qcom,allowed-modes =
+ <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ qcom,mode-threshold-currents = <0 10000>;
+ };
+
+ ldo26 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ qcom,regulator-initial-voltage = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ qcom,allowed-modes =
+ <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ qcom,mode-threshold-currents = <0 1>;
+ };
+
+ lvs1 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ };
+
+ pmi8998-rpmh-regulators {
+ bob {
+ regulator-min-microvolt = <3312000>;
+ regulator-max-microvolt = <3600000>;
+ qcom,regulator-initial-voltage = <3312000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_PASS>;
+ };
+ };
+};
+
+&ufsphy {
+ status = "ok";
+
+ vdda-phy-supply = <&pm8998_l1>; /* 0.88v */
+ vdda-pll-supply = <&pm8998_l26>; /* 1.2v */
+ vdda-phy-max-microamp = <62900>;
+ vdda-pll-max-microamp = <18300>;
+};
+
+&ufshc {
+ status = "ok";
+
+ vcc-supply = <&pm8998_l20>;
+ vcc-voltage-level = <2950000 2960000>;
+ vcc-max-microamp = <600000>;
+ vccq2-max-microamp = <600000>;
+
+ qcom,vddp-ref-clk-supply = <&pm8998_l2>;
+ qcom,vddp-ref-clk-max-microamp = <100>;
+};
diff --git a/rr-cache/17b1ce0c1269b821be9f793e477f850010223173/preimage b/rr-cache/17b1ce0c1269b821be9f793e477f850010223173/preimage
new file mode 100644
index 0000000..8446e7b
--- /dev/null
+++ b/rr-cache/17b1ce0c1269b821be9f793e477f850010223173/preimage
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SDM845 MTP board device tree source
+ *
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include "sdm845.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDM845 MTP";
+ compatible = "qcom,sdm845-mtp";
+<<<<<<<
+=======
+
+ aliases {
+ serial0 = &uart2;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+};
+
+&soc {
+ geniqup@ac0000 {
+ status = "okay";
+
+ serial@a84000 {
+ status = "okay";
+ };
+
+ i2c@a88000 {
+ clock-frequency = <400000>;
+ status = "okay";
+ };
+ };
+
+ pinctrl@3400000 {
+ qup-i2c10-default {
+ pinconf {
+ pins = "gpio55", "gpio56";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ qup-i2c10-sleep {
+ pinconf {
+ pins = "gpio55", "gpio56";
+ };
+ };
+
+ qup-uart2-default {
+ pinconf_tx {
+ pins = "gpio4";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ pinconf_rx {
+ pins = "gpio5";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ qup-uart2-sleep {
+ pinconf {
+ pins = "gpio4", "gpio5";
+ bias-pull-down;
+ };
+ };
+ };
+
+ phy@1d87000 {
+ status = "ok";
+
+ vdda-phy-supply = <&pm8998_l1>; /* 0.88v */
+ vdda-pll-supply = <&pm8998_l26>; /* 1.2v */
+ vdda-phy-max-microamp = <62900>;
+ vdda-pll-max-microamp = <18300>;
+ };
+
+ ufshc@1d84000 {
+ status = "ok";
+
+ vcc-supply = <&pm8998_l20>;
+ vcc-voltage-level = <2950000 2960000>;
+ vcc-max-microamp = <600000>;
+ vccq2-max-microamp = <600000>;
+
+ qcom,vddp-ref-clk-supply = <&pm8998_l2>;
+ qcom,vddp-ref-clk-max-microamp = <100>;
+
+ };
+>>>>>>>
+};
+
+
+&apps_rsc {
+ pm8998-rpmh-regulators {
+ vdd_l7_l12_l14_l15-supply = <&pm8998_s5>;
+
+ smps2 {
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ qcom,regulator-initial-voltage = <1100000>;
+ };
+
+ smps5 {
+ regulator-min-microvolt = <1904000>;
+ regulator-max-microvolt = <2040000>;
+ qcom,regulator-initial-voltage = <1904000>;
+ };
+
+ ldo1 {
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <880000>;
+ qcom,regulator-initial-voltage = <880000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ qcom,allowed-modes =
+ <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ qcom,mode-threshold-currents = <0 1>;
+ };
+
+ ldo2 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ qcom,regulator-initial-voltage = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ qcom,allowed-modes =
+ <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ qcom,mode-threshold-currents = <0 30000>;
+ };
+
+
+ ldo7 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ qcom,regulator-initial-voltage = <1800000>;
+ qcom,headroom-voltage = <56000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ regulator-allow-set-load;
+ qcom,allowed-modes =
+ <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ qcom,mode-threshold-currents = <0 10000>;
+ };
+
+ ldo20 {
+ regulator-min-microvolt = <2704000>;
+ regulator-max-microvolt = <2960000>;
+ qcom,regulator-initial-voltage = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ qcom,allowed-modes =
+ <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ qcom,mode-threshold-currents = <0 10000>;
+ };
+
+ ldo26 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ qcom,regulator-initial-voltage = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ qcom,allowed-modes =
+ <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ qcom,mode-threshold-currents = <0 1>;
+ };
+
+ lvs1 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ };
+
+ pmi8998-rpmh-regulators {
+ bob {
+ regulator-min-microvolt = <3312000>;
+ regulator-max-microvolt = <3600000>;
+ qcom,regulator-initial-voltage = <3312000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_PASS>;
+ };
+ };
+};
diff --git a/rr-cache/1860c4dd358223c839fa46222e1804a4ae4f2e7c/postimage b/rr-cache/1860c4dd358223c839fa46222e1804a4ae4f2e7c/postimage
new file mode 100644
index 0000000..4f56999
--- /dev/null
+++ b/rr-cache/1860c4dd358223c839fa46222e1804a4ae4f2e7c/postimage
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __Q6_ADM_V2_H__
+#define __Q6_ADM_V2_H__
+
+#define ADM_PATH_PLAYBACK 0x1
+#define ADM_PATH_LIVE_REC 0x2
+#define MAX_COPPS_PER_PORT 8
+#define NULL_COPP_TOPOLOGY 0x00010312
+
+/* multiple copp per stream. */
+struct route_payload {
+ int num_copps;
+ int session_id;
+ int copp_idx[MAX_COPPS_PER_PORT];
+ int port_id[MAX_COPPS_PER_PORT];
+};
+
+struct q6copp;
+struct q6copp *q6adm_open(struct device *dev, int port_id, int path, int rate,
+ int channel_mode, int topology, int perf_mode,
+ uint16_t bit_width, int app_type, int acdb_id);
+int q6adm_close(struct device *dev, struct q6copp *copp);
+int q6adm_get_copp_id(struct q6copp *copp);
+int q6adm_matrix_map(struct device *dev, int path,
+ struct route_payload payload_map, int perf_mode);
+
+#endif /* __Q6_ADM_V2_H__ */
diff --git a/rr-cache/1860c4dd358223c839fa46222e1804a4ae4f2e7c/preimage b/rr-cache/1860c4dd358223c839fa46222e1804a4ae4f2e7c/preimage
new file mode 100644
index 0000000..f775d12
--- /dev/null
+++ b/rr-cache/1860c4dd358223c839fa46222e1804a4ae4f2e7c/preimage
@@ -0,0 +1,43 @@
+<<<<<<<
+/* SPDX-License-Identifier: GPL-2.0 */
+=======
+// SPDX-License-Identifier: GPL-2.0
+>>>>>>>
+#ifndef __Q6_ADM_V2_H__
+#define __Q6_ADM_V2_H__
+
+#define ADM_PATH_PLAYBACK 0x1
+#define ADM_PATH_LIVE_REC 0x2
+#define MAX_COPPS_PER_PORT 8
+#define NULL_COPP_TOPOLOGY 0x00010312
+
+/* multiple copp per stream. */
+struct route_payload {
+ int num_copps;
+ int session_id;
+ int copp_idx[MAX_COPPS_PER_PORT];
+ int port_id[MAX_COPPS_PER_PORT];
+};
+
+<<<<<<<
+int q6pcm_routing_probe(struct device *dev);
+int q6pcm_routing_remove(struct device *dev);
+
+void *q6adm_get_routing_data(struct device *dev);
+void q6adm_set_routing_data(struct device *dev, void *data);
+int q6adm_open(struct device *dev, int port_id, int path, int rate,
+ int channel_mode, int topology, int perf_mode,
+ uint16_t bit_width, int app_type, int acdb_id);
+int q6adm_close(struct device *dev, int port, int topology, int perf_mode);
+=======
+struct q6copp;
+struct q6copp *q6adm_open(struct device *dev, int port_id, int path, int rate,
+ int channel_mode, int topology, int perf_mode,
+ uint16_t bit_width, int app_type, int acdb_id);
+int q6adm_close(struct device *dev, struct q6copp *copp);
+int q6adm_get_copp_id(struct q6copp *copp);
+>>>>>>>
+int q6adm_matrix_map(struct device *dev, int path,
+ struct route_payload payload_map, int perf_mode);
+
+#endif /* __Q6_ADM_V2_H__ */
diff --git a/rr-cache/1860c4dd358223c839fa46222e1804a4ae4f2e7c/thisimage b/rr-cache/1860c4dd358223c839fa46222e1804a4ae4f2e7c/thisimage
new file mode 100644
index 0000000..f775d12
--- /dev/null
+++ b/rr-cache/1860c4dd358223c839fa46222e1804a4ae4f2e7c/thisimage
@@ -0,0 +1,43 @@
+<<<<<<<
+/* SPDX-License-Identifier: GPL-2.0 */
+=======
+// SPDX-License-Identifier: GPL-2.0
+>>>>>>>
+#ifndef __Q6_ADM_V2_H__
+#define __Q6_ADM_V2_H__
+
+#define ADM_PATH_PLAYBACK 0x1
+#define ADM_PATH_LIVE_REC 0x2
+#define MAX_COPPS_PER_PORT 8
+#define NULL_COPP_TOPOLOGY 0x00010312
+
+/* multiple copp per stream. */
+struct route_payload {
+ int num_copps;
+ int session_id;
+ int copp_idx[MAX_COPPS_PER_PORT];
+ int port_id[MAX_COPPS_PER_PORT];
+};
+
+<<<<<<<
+int q6pcm_routing_probe(struct device *dev);
+int q6pcm_routing_remove(struct device *dev);
+
+void *q6adm_get_routing_data(struct device *dev);
+void q6adm_set_routing_data(struct device *dev, void *data);
+int q6adm_open(struct device *dev, int port_id, int path, int rate,
+ int channel_mode, int topology, int perf_mode,
+ uint16_t bit_width, int app_type, int acdb_id);
+int q6adm_close(struct device *dev, int port, int topology, int perf_mode);
+=======
+struct q6copp;
+struct q6copp *q6adm_open(struct device *dev, int port_id, int path, int rate,
+ int channel_mode, int topology, int perf_mode,
+ uint16_t bit_width, int app_type, int acdb_id);
+int q6adm_close(struct device *dev, struct q6copp *copp);
+int q6adm_get_copp_id(struct q6copp *copp);
+>>>>>>>
+int q6adm_matrix_map(struct device *dev, int path,
+ struct route_payload payload_map, int perf_mode);
+
+#endif /* __Q6_ADM_V2_H__ */
diff --git a/rr-cache/196e4598f8fdc3b02404cb22d591e9b5915f7498/postimage b/rr-cache/196e4598f8fdc3b02404cb22d591e9b5915f7498/postimage
new file mode 100644
index 0000000..39ce64c
--- /dev/null
+++ b/rr-cache/196e4598f8fdc3b02404cb22d591e9b5915f7498/postimage
@@ -0,0 +1,291 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/platform_device.h>
+#include <linux/clk-provider.h>
+#include <linux/reset-controller.h>
+#include <linux/of.h>
+
+#include "common.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "reset.h"
+#include "gdsc.h"
+
+struct qcom_cc {
+ struct qcom_reset_controller reset;
+ struct clk_regmap **rclks;
+ size_t num_rclks;
+};
+
+const
+struct freq_tbl *qcom_find_freq(const struct freq_tbl *f, unsigned long rate)
+{
+ if (!f)
+ return NULL;
+
+ for (; f->freq; f++)
+ if (rate <= f->freq)
+ return f;
+
+ /* Default to our fastest rate */
+ return f - 1;
+}
+EXPORT_SYMBOL_GPL(qcom_find_freq);
+
+const struct freq_tbl *qcom_find_freq_floor(const struct freq_tbl *f,
+ unsigned long rate)
+{
+ const struct freq_tbl *best = NULL;
+
+ for ( ; f->freq; f++) {
+ if (rate >= f->freq)
+ best = f;
+ else
+ break;
+ }
+
+ return best;
+}
+EXPORT_SYMBOL_GPL(qcom_find_freq_floor);
+
+int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map, u8 src)
+{
+ int i, num_parents = clk_hw_get_num_parents(hw);
+
+ for (i = 0; i < num_parents; i++)
+ if (src == map[i].src)
+ return i;
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(qcom_find_src_index);
+
+struct regmap *
+qcom_cc_map(struct platform_device *pdev, const struct qcom_cc_desc *desc)
+{
+ void __iomem *base;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return ERR_CAST(base);
+
+ return devm_regmap_init_mmio(dev, base, desc->config);
+}
+EXPORT_SYMBOL_GPL(qcom_cc_map);
+
+void
+qcom_pll_set_fsm_mode(struct regmap *map, u32 reg, u8 bias_count, u8 lock_count)
+{
+ u32 val;
+ u32 mask;
+
+ /* De-assert reset to FSM */
+ regmap_update_bits(map, reg, PLL_VOTE_FSM_RESET, 0);
+
+ /* Program bias count and lock count */
+ val = bias_count << PLL_BIAS_COUNT_SHIFT |
+ lock_count << PLL_LOCK_COUNT_SHIFT;
+ mask = PLL_BIAS_COUNT_MASK << PLL_BIAS_COUNT_SHIFT;
+ mask |= PLL_LOCK_COUNT_MASK << PLL_LOCK_COUNT_SHIFT;
+ regmap_update_bits(map, reg, mask, val);
+
+ /* Enable PLL FSM voting */
+ regmap_update_bits(map, reg, PLL_VOTE_FSM_ENA, PLL_VOTE_FSM_ENA);
+}
+EXPORT_SYMBOL_GPL(qcom_pll_set_fsm_mode);
+
+static void qcom_cc_gdsc_unregister(void *data)
+{
+ gdsc_unregister(data);
+}
+
+/*
+ * Backwards compatibility with old DTs. Register a pass-through factor 1/1
+ * clock to translate 'path' clk into 'name' clk and register the 'path'
+ * clk as a fixed rate clock if it isn't present.
+ */
+static int _qcom_cc_register_board_clk(struct device *dev, const char *path,
+ const char *name, unsigned long rate,
+ bool add_factor)
+{
+ struct device_node *node = NULL;
+ struct device_node *clocks_node;
+ struct clk_fixed_factor *factor;
+ struct clk_fixed_rate *fixed;
+ struct clk_init_data init_data = { };
+ int ret;
+
+ clocks_node = of_find_node_by_path("/clocks");
+ if (clocks_node) {
+ node = of_get_child_by_name(clocks_node, path);
+ of_node_put(clocks_node);
+ }
+
+ if (!node) {
+ fixed = devm_kzalloc(dev, sizeof(*fixed), GFP_KERNEL);
+ if (!fixed)
+ return -EINVAL;
+
+ fixed->fixed_rate = rate;
+ fixed->hw.init = &init_data;
+
+ init_data.name = path;
+ init_data.ops = &clk_fixed_rate_ops;
+
+ ret = devm_clk_hw_register(dev, &fixed->hw);
+ if (ret)
+ return ret;
+ }
+ of_node_put(node);
+
+ if (add_factor) {
+ factor = devm_kzalloc(dev, sizeof(*factor), GFP_KERNEL);
+ if (!factor)
+ return -EINVAL;
+
+ factor->mult = factor->div = 1;
+ factor->hw.init = &init_data;
+
+ init_data.name = name;
+ init_data.parent_names = &path;
+ init_data.num_parents = 1;
+ init_data.flags = 0;
+ init_data.ops = &clk_fixed_factor_ops;
+
+ ret = devm_clk_hw_register(dev, &factor->hw);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int qcom_cc_register_board_clk(struct device *dev, const char *path,
+ const char *name, unsigned long rate)
+{
+ bool add_factor = true;
+
+ /*
+ * TODO: The RPM clock driver currently does not support the xo clock.
+ * When xo is added to the RPM clock driver, we should change this
+ * function to skip registration of xo factor clocks.
+ */
+
+ return _qcom_cc_register_board_clk(dev, path, name, rate, add_factor);
+}
+EXPORT_SYMBOL_GPL(qcom_cc_register_board_clk);
+
+int qcom_cc_register_sleep_clk(struct device *dev)
+{
+ return _qcom_cc_register_board_clk(dev, "sleep_clk", "sleep_clk_src",
+ 32768, true);
+}
+EXPORT_SYMBOL_GPL(qcom_cc_register_sleep_clk);
+
+static struct clk_hw *qcom_cc_clk_hw_get(struct of_phandle_args *clkspec,
+ void *data)
+{
+ struct qcom_cc *cc = data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx >= cc->num_rclks) {
+ pr_err("%s: invalid index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return cc->rclks[idx] ? &cc->rclks[idx]->hw : ERR_PTR(-ENOENT);
+}
+
+int qcom_cc_really_probe(struct platform_device *pdev,
+ const struct qcom_cc_desc *desc, struct regmap *regmap)
+{
+ int i, ret;
+ struct device *dev = &pdev->dev;
+ struct qcom_reset_controller *reset;
+ struct qcom_cc *cc;
+ struct gdsc_desc *scd;
+ size_t num_clks = desc->num_clks;
+ struct clk_regmap **rclks = desc->clks;
+
+ cc = devm_kzalloc(dev, sizeof(*cc), GFP_KERNEL);
+ if (!cc)
+ return -ENOMEM;
+
+ reset = &cc->reset;
+ reset->rcdev.of_node = dev->of_node;
+ reset->rcdev.ops = &qcom_reset_ops;
+ reset->rcdev.owner = dev->driver->owner;
+ reset->rcdev.nr_resets = desc->num_resets;
+ reset->regmap = regmap;
+ reset->reset_map = desc->resets;
+
+ ret = devm_reset_controller_register(dev, &reset->rcdev);
+ if (ret)
+ return ret;
+
+ if (desc->gdscs && desc->num_gdscs) {
+ scd = devm_kzalloc(dev, sizeof(*scd), GFP_KERNEL);
+ if (!scd)
+ return -ENOMEM;
+ scd->dev = dev;
+ scd->scs = desc->gdscs;
+ scd->num = desc->num_gdscs;
+ ret = gdsc_register(scd, &reset->rcdev, regmap);
+ if (ret)
+ return ret;
+ ret = devm_add_action_or_reset(dev, qcom_cc_gdsc_unregister,
+ scd);
+ if (ret)
+ return ret;
+ }
+
+ cc->rclks = rclks;
+ cc->num_rclks = num_clks;
+
+ for (i = 0; i < num_clks; i++) {
+ if (!rclks[i])
+ continue;
+
+ ret = devm_clk_register_regmap(dev, rclks[i]);
+ if (ret)
+ return ret;
+ }
+
+ ret = devm_of_clk_add_hw_provider(dev, qcom_cc_clk_hw_get, cc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_cc_really_probe);
+
+int qcom_cc_probe(struct platform_device *pdev, const struct qcom_cc_desc *desc)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return qcom_cc_really_probe(pdev, desc, regmap);
+}
+EXPORT_SYMBOL_GPL(qcom_cc_probe);
+
+MODULE_LICENSE("GPL v2");
diff --git a/rr-cache/196e4598f8fdc3b02404cb22d591e9b5915f7498/preimage b/rr-cache/196e4598f8fdc3b02404cb22d591e9b5915f7498/preimage
new file mode 100644
index 0000000..78ef38c
--- /dev/null
+++ b/rr-cache/196e4598f8fdc3b02404cb22d591e9b5915f7498/preimage
@@ -0,0 +1,295 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/platform_device.h>
+#include <linux/clk-provider.h>
+#include <linux/reset-controller.h>
+#include <linux/of.h>
+
+#include "common.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "reset.h"
+#include "gdsc.h"
+
+struct qcom_cc {
+ struct qcom_reset_controller reset;
+ struct clk_regmap **rclks;
+ size_t num_rclks;
+};
+
+const
+struct freq_tbl *qcom_find_freq(const struct freq_tbl *f, unsigned long rate)
+{
+ if (!f)
+ return NULL;
+
+ for (; f->freq; f++)
+ if (rate <= f->freq)
+ return f;
+
+ /* Default to our fastest rate */
+ return f - 1;
+}
+EXPORT_SYMBOL_GPL(qcom_find_freq);
+
+const struct freq_tbl *qcom_find_freq_floor(const struct freq_tbl *f,
+ unsigned long rate)
+{
+ const struct freq_tbl *best = NULL;
+
+ for ( ; f->freq; f++) {
+ if (rate >= f->freq)
+ best = f;
+ else
+ break;
+ }
+
+ return best;
+}
+EXPORT_SYMBOL_GPL(qcom_find_freq_floor);
+
+int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map, u8 src)
+{
+ int i, num_parents = clk_hw_get_num_parents(hw);
+
+ for (i = 0; i < num_parents; i++)
+ if (src == map[i].src)
+ return i;
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(qcom_find_src_index);
+
+struct regmap *
+qcom_cc_map(struct platform_device *pdev, const struct qcom_cc_desc *desc)
+{
+ void __iomem *base;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return ERR_CAST(base);
+
+ return devm_regmap_init_mmio(dev, base, desc->config);
+}
+EXPORT_SYMBOL_GPL(qcom_cc_map);
+
+void
+qcom_pll_set_fsm_mode(struct regmap *map, u32 reg, u8 bias_count, u8 lock_count)
+{
+ u32 val;
+ u32 mask;
+
+ /* De-assert reset to FSM */
+ regmap_update_bits(map, reg, PLL_VOTE_FSM_RESET, 0);
+
+ /* Program bias count and lock count */
+ val = bias_count << PLL_BIAS_COUNT_SHIFT |
+ lock_count << PLL_LOCK_COUNT_SHIFT;
+ mask = PLL_BIAS_COUNT_MASK << PLL_BIAS_COUNT_SHIFT;
+ mask |= PLL_LOCK_COUNT_MASK << PLL_LOCK_COUNT_SHIFT;
+ regmap_update_bits(map, reg, mask, val);
+
+ /* Enable PLL FSM voting */
+ regmap_update_bits(map, reg, PLL_VOTE_FSM_ENA, PLL_VOTE_FSM_ENA);
+}
+EXPORT_SYMBOL_GPL(qcom_pll_set_fsm_mode);
+
+static void qcom_cc_gdsc_unregister(void *data)
+{
+ gdsc_unregister(data);
+}
+
+/*
+ * Backwards compatibility with old DTs. Register a pass-through factor 1/1
+ * clock to translate 'path' clk into 'name' clk and register the 'path'
+ * clk as a fixed rate clock if it isn't present.
+ */
+static int _qcom_cc_register_board_clk(struct device *dev, const char *path,
+ const char *name, unsigned long rate,
+ bool add_factor)
+{
+ struct device_node *node = NULL;
+ struct device_node *clocks_node;
+ struct clk_fixed_factor *factor;
+ struct clk_fixed_rate *fixed;
+ struct clk_init_data init_data = { };
+ int ret;
+
+ clocks_node = of_find_node_by_path("/clocks");
+ if (clocks_node) {
+ node = of_get_child_by_name(clocks_node, path);
+ of_node_put(clocks_node);
+ }
+
+ if (!node) {
+ fixed = devm_kzalloc(dev, sizeof(*fixed), GFP_KERNEL);
+ if (!fixed)
+ return -EINVAL;
+
+ fixed->fixed_rate = rate;
+ fixed->hw.init = &init_data;
+
+ init_data.name = path;
+ init_data.ops = &clk_fixed_rate_ops;
+
+ ret = devm_clk_hw_register(dev, &fixed->hw);
+ if (ret)
+ return ret;
+ }
+ of_node_put(node);
+
+ if (add_factor) {
+ factor = devm_kzalloc(dev, sizeof(*factor), GFP_KERNEL);
+ if (!factor)
+ return -EINVAL;
+
+ factor->mult = factor->div = 1;
+ factor->hw.init = &init_data;
+
+ init_data.name = name;
+ init_data.parent_names = &path;
+ init_data.num_parents = 1;
+ init_data.flags = 0;
+ init_data.ops = &clk_fixed_factor_ops;
+
+ ret = devm_clk_hw_register(dev, &factor->hw);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int qcom_cc_register_board_clk(struct device *dev, const char *path,
+ const char *name, unsigned long rate)
+{
+ bool add_factor = true;
+
+ /*
+ * TODO: The RPM clock driver currently does not support the xo clock.
+ * When xo is added to the RPM clock driver, we should change this
+ * function to skip registration of xo factor clocks.
+ */
+
+ return _qcom_cc_register_board_clk(dev, path, name, rate, add_factor);
+}
+EXPORT_SYMBOL_GPL(qcom_cc_register_board_clk);
+
+int qcom_cc_register_sleep_clk(struct device *dev)
+{
+ return _qcom_cc_register_board_clk(dev, "sleep_clk", "sleep_clk_src",
+ 32768, true);
+}
+EXPORT_SYMBOL_GPL(qcom_cc_register_sleep_clk);
+
+static struct clk_hw *qcom_cc_clk_hw_get(struct of_phandle_args *clkspec,
+ void *data)
+{
+ struct qcom_cc *cc = data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx >= cc->num_rclks) {
+ pr_err("%s: invalid index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return cc->rclks[idx] ? &cc->rclks[idx]->hw : ERR_PTR(-ENOENT);
+}
+
+int qcom_cc_really_probe(struct platform_device *pdev,
+ const struct qcom_cc_desc *desc, struct regmap *regmap)
+{
+ int i, ret;
+ struct device *dev = &pdev->dev;
+ struct qcom_reset_controller *reset;
+ struct qcom_cc *cc;
+ struct gdsc_desc *scd;
+ size_t num_clks = desc->num_clks;
+ struct clk_regmap **rclks = desc->clks;
+
+ cc = devm_kzalloc(dev, sizeof(*cc), GFP_KERNEL);
+ if (!cc)
+ return -ENOMEM;
+
+ reset = &cc->reset;
+ reset->rcdev.of_node = dev->of_node;
+ reset->rcdev.ops = &qcom_reset_ops;
+ reset->rcdev.owner = dev->driver->owner;
+ reset->rcdev.nr_resets = desc->num_resets;
+ reset->regmap = regmap;
+ reset->reset_map = desc->resets;
+
+ ret = devm_reset_controller_register(dev, &reset->rcdev);
+ if (ret)
+ return ret;
+
+ if (desc->gdscs && desc->num_gdscs) {
+ scd = devm_kzalloc(dev, sizeof(*scd), GFP_KERNEL);
+ if (!scd)
+ return -ENOMEM;
+ scd->dev = dev;
+ scd->scs = desc->gdscs;
+ scd->num = desc->num_gdscs;
+ ret = gdsc_register(scd, &reset->rcdev, regmap);
+ if (ret)
+ return ret;
+ ret = devm_add_action_or_reset(dev, qcom_cc_gdsc_unregister,
+ scd);
+ if (ret)
+ return ret;
+ }
+
+ cc->rclks = rclks;
+ cc->num_rclks = num_clks;
+
+ for (i = 0; i < num_clks; i++) {
+ if (!rclks[i])
+ continue;
+
+ ret = devm_clk_register_regmap(dev, rclks[i]);
+ if (ret)
+ return ret;
+ }
+
+<<<<<<<
+ ret = devm_of_clk_add_hw_provider(dev, qcom_cc_clk_hw_get, cc);
+=======
+ ret = of_clk_add_hw_provider(dev->of_node, qcom_cc_clk_hw_get, cc);
+>>>>>>>
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_cc_really_probe);
+
+int qcom_cc_probe(struct platform_device *pdev, const struct qcom_cc_desc *desc)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return qcom_cc_really_probe(pdev, desc, regmap);
+}
+EXPORT_SYMBOL_GPL(qcom_cc_probe);
+
+MODULE_LICENSE("GPL v2");
diff --git a/rr-cache/196e4598f8fdc3b02404cb22d591e9b5915f7498/thisimage b/rr-cache/196e4598f8fdc3b02404cb22d591e9b5915f7498/thisimage
new file mode 100644
index 0000000..78ef38c
--- /dev/null
+++ b/rr-cache/196e4598f8fdc3b02404cb22d591e9b5915f7498/thisimage
@@ -0,0 +1,295 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/platform_device.h>
+#include <linux/clk-provider.h>
+#include <linux/reset-controller.h>
+#include <linux/of.h>
+
+#include "common.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "reset.h"
+#include "gdsc.h"
+
+struct qcom_cc {
+ struct qcom_reset_controller reset;
+ struct clk_regmap **rclks;
+ size_t num_rclks;
+};
+
+const
+struct freq_tbl *qcom_find_freq(const struct freq_tbl *f, unsigned long rate)
+{
+ if (!f)
+ return NULL;
+
+ for (; f->freq; f++)
+ if (rate <= f->freq)
+ return f;
+
+ /* Default to our fastest rate */
+ return f - 1;
+}
+EXPORT_SYMBOL_GPL(qcom_find_freq);
+
+const struct freq_tbl *qcom_find_freq_floor(const struct freq_tbl *f,
+ unsigned long rate)
+{
+ const struct freq_tbl *best = NULL;
+
+ for ( ; f->freq; f++) {
+ if (rate >= f->freq)
+ best = f;
+ else
+ break;
+ }
+
+ return best;
+}
+EXPORT_SYMBOL_GPL(qcom_find_freq_floor);
+
+int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map, u8 src)
+{
+ int i, num_parents = clk_hw_get_num_parents(hw);
+
+ for (i = 0; i < num_parents; i++)
+ if (src == map[i].src)
+ return i;
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(qcom_find_src_index);
+
+struct regmap *
+qcom_cc_map(struct platform_device *pdev, const struct qcom_cc_desc *desc)
+{
+ void __iomem *base;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return ERR_CAST(base);
+
+ return devm_regmap_init_mmio(dev, base, desc->config);
+}
+EXPORT_SYMBOL_GPL(qcom_cc_map);
+
+void
+qcom_pll_set_fsm_mode(struct regmap *map, u32 reg, u8 bias_count, u8 lock_count)
+{
+ u32 val;
+ u32 mask;
+
+ /* De-assert reset to FSM */
+ regmap_update_bits(map, reg, PLL_VOTE_FSM_RESET, 0);
+
+ /* Program bias count and lock count */
+ val = bias_count << PLL_BIAS_COUNT_SHIFT |
+ lock_count << PLL_LOCK_COUNT_SHIFT;
+ mask = PLL_BIAS_COUNT_MASK << PLL_BIAS_COUNT_SHIFT;
+ mask |= PLL_LOCK_COUNT_MASK << PLL_LOCK_COUNT_SHIFT;
+ regmap_update_bits(map, reg, mask, val);
+
+ /* Enable PLL FSM voting */
+ regmap_update_bits(map, reg, PLL_VOTE_FSM_ENA, PLL_VOTE_FSM_ENA);
+}
+EXPORT_SYMBOL_GPL(qcom_pll_set_fsm_mode);
+
+static void qcom_cc_gdsc_unregister(void *data)
+{
+ gdsc_unregister(data);
+}
+
+/*
+ * Backwards compatibility with old DTs. Register a pass-through factor 1/1
+ * clock to translate 'path' clk into 'name' clk and register the 'path'
+ * clk as a fixed rate clock if it isn't present.
+ */
+static int _qcom_cc_register_board_clk(struct device *dev, const char *path,
+ const char *name, unsigned long rate,
+ bool add_factor)
+{
+ struct device_node *node = NULL;
+ struct device_node *clocks_node;
+ struct clk_fixed_factor *factor;
+ struct clk_fixed_rate *fixed;
+ struct clk_init_data init_data = { };
+ int ret;
+
+ clocks_node = of_find_node_by_path("/clocks");
+ if (clocks_node) {
+ node = of_get_child_by_name(clocks_node, path);
+ of_node_put(clocks_node);
+ }
+
+ if (!node) {
+ fixed = devm_kzalloc(dev, sizeof(*fixed), GFP_KERNEL);
+ if (!fixed)
+ return -EINVAL;
+
+ fixed->fixed_rate = rate;
+ fixed->hw.init = &init_data;
+
+ init_data.name = path;
+ init_data.ops = &clk_fixed_rate_ops;
+
+ ret = devm_clk_hw_register(dev, &fixed->hw);
+ if (ret)
+ return ret;
+ }
+ of_node_put(node);
+
+ if (add_factor) {
+ factor = devm_kzalloc(dev, sizeof(*factor), GFP_KERNEL);
+ if (!factor)
+ return -EINVAL;
+
+ factor->mult = factor->div = 1;
+ factor->hw.init = &init_data;
+
+ init_data.name = name;
+ init_data.parent_names = &path;
+ init_data.num_parents = 1;
+ init_data.flags = 0;
+ init_data.ops = &clk_fixed_factor_ops;
+
+ ret = devm_clk_hw_register(dev, &factor->hw);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int qcom_cc_register_board_clk(struct device *dev, const char *path,
+ const char *name, unsigned long rate)
+{
+ bool add_factor = true;
+
+ /*
+ * TODO: The RPM clock driver currently does not support the xo clock.
+ * When xo is added to the RPM clock driver, we should change this
+ * function to skip registration of xo factor clocks.
+ */
+
+ return _qcom_cc_register_board_clk(dev, path, name, rate, add_factor);
+}
+EXPORT_SYMBOL_GPL(qcom_cc_register_board_clk);
+
+int qcom_cc_register_sleep_clk(struct device *dev)
+{
+ return _qcom_cc_register_board_clk(dev, "sleep_clk", "sleep_clk_src",
+ 32768, true);
+}
+EXPORT_SYMBOL_GPL(qcom_cc_register_sleep_clk);
+
+static struct clk_hw *qcom_cc_clk_hw_get(struct of_phandle_args *clkspec,
+ void *data)
+{
+ struct qcom_cc *cc = data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx >= cc->num_rclks) {
+ pr_err("%s: invalid index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return cc->rclks[idx] ? &cc->rclks[idx]->hw : ERR_PTR(-ENOENT);
+}
+
+int qcom_cc_really_probe(struct platform_device *pdev,
+ const struct qcom_cc_desc *desc, struct regmap *regmap)
+{
+ int i, ret;
+ struct device *dev = &pdev->dev;
+ struct qcom_reset_controller *reset;
+ struct qcom_cc *cc;
+ struct gdsc_desc *scd;
+ size_t num_clks = desc->num_clks;
+ struct clk_regmap **rclks = desc->clks;
+
+ cc = devm_kzalloc(dev, sizeof(*cc), GFP_KERNEL);
+ if (!cc)
+ return -ENOMEM;
+
+ reset = &cc->reset;
+ reset->rcdev.of_node = dev->of_node;
+ reset->rcdev.ops = &qcom_reset_ops;
+ reset->rcdev.owner = dev->driver->owner;
+ reset->rcdev.nr_resets = desc->num_resets;
+ reset->regmap = regmap;
+ reset->reset_map = desc->resets;
+
+ ret = devm_reset_controller_register(dev, &reset->rcdev);
+ if (ret)
+ return ret;
+
+ if (desc->gdscs && desc->num_gdscs) {
+ scd = devm_kzalloc(dev, sizeof(*scd), GFP_KERNEL);
+ if (!scd)
+ return -ENOMEM;
+ scd->dev = dev;
+ scd->scs = desc->gdscs;
+ scd->num = desc->num_gdscs;
+ ret = gdsc_register(scd, &reset->rcdev, regmap);
+ if (ret)
+ return ret;
+ ret = devm_add_action_or_reset(dev, qcom_cc_gdsc_unregister,
+ scd);
+ if (ret)
+ return ret;
+ }
+
+ cc->rclks = rclks;
+ cc->num_rclks = num_clks;
+
+ for (i = 0; i < num_clks; i++) {
+ if (!rclks[i])
+ continue;
+
+ ret = devm_clk_register_regmap(dev, rclks[i]);
+ if (ret)
+ return ret;
+ }
+
+<<<<<<<
+ ret = devm_of_clk_add_hw_provider(dev, qcom_cc_clk_hw_get, cc);
+=======
+ ret = of_clk_add_hw_provider(dev->of_node, qcom_cc_clk_hw_get, cc);
+>>>>>>>
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_cc_really_probe);
+
+int qcom_cc_probe(struct platform_device *pdev, const struct qcom_cc_desc *desc)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return qcom_cc_really_probe(pdev, desc, regmap);
+}
+EXPORT_SYMBOL_GPL(qcom_cc_probe);
+
+MODULE_LICENSE("GPL v2");
diff --git a/rr-cache/1a12c9e7bc9e61f3fd259e77b7e798a449dd5e0d/preimage b/rr-cache/1a12c9e7bc9e61f3fd259e77b7e798a449dd5e0d/preimage
new file mode 100644
index 0000000..e43c9f4
--- /dev/null
+++ b/rr-cache/1a12c9e7bc9e61f3fd259e77b7e798a449dd5e0d/preimage
@@ -0,0 +1,440 @@
+<<<<<<<
+/* SPDX-License-Identifier: GPL-2.0 */
+=======
+// SPDX-License-Identifier: GPL-2.0
+>>>>>>>
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+<<<<<<<
+#ifndef _DT_BINDINGS_CLK_MSM_GCC_SDM845_H
+#define _DT_BINDINGS_CLK_MSM_GCC_SDM845_H
+
+/* GCC clock registers */
+#define BI_TCXO 0
+#define GCC_AGGRE_NOC_PCIE_TBU_CLK 1
+#define GCC_AGGRE_UFS_CARD_AXI_CLK 2
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 3
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 4
+#define GCC_AGGRE_USB3_SEC_AXI_CLK 5
+#define GCC_BOOT_ROM_AHB_CLK 6
+#define GCC_CAMERA_AHB_CLK 7
+#define GCC_CAMERA_AXI_CLK 8
+#define GCC_CAMERA_XO_CLK 9
+#define GCC_CE1_AHB_CLK 10
+#define GCC_CE1_AXI_CLK 11
+#define GCC_CE1_CLK 12
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 13
+#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 14
+#define GCC_CPUSS_AHB_CLK 15
+#define GCC_CPUSS_AHB_CLK_SRC 16
+#define GCC_CPUSS_DVM_BUS_CLK 17
+#define GCC_CPUSS_GNOC_CLK 18
+#define GCC_CPUSS_RBCPR_CLK 19
+#define GCC_CPUSS_RBCPR_CLK_SRC 20
+#define GCC_DDRSS_GPU_AXI_CLK 21
+#define GCC_DISP_AHB_CLK 22
+#define GCC_DISP_AXI_CLK 23
+#define GCC_DISP_GPLL0_CLK_SRC 24
+#define GCC_DISP_GPLL0_DIV_CLK_SRC 25
+#define GCC_DISP_XO_CLK 26
+#define GCC_GP1_CLK 27
+#define GCC_GP1_CLK_SRC 28
+#define GCC_GP2_CLK 29
+#define GCC_GP2_CLK_SRC 30
+#define GCC_GP3_CLK 31
+#define GCC_GP3_CLK_SRC 32
+#define GCC_GPU_CFG_AHB_CLK 33
+#define GCC_GPU_GPLL0_CLK_SRC 34
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 35
+#define GCC_GPU_MEMNOC_GFX_CLK 36
+#define GCC_GPU_SNOC_DVM_GFX_CLK 37
+#define GCC_MSS_AXIS2_CLK 38
+#define GCC_MSS_CFG_AHB_CLK 39
+#define GCC_MSS_GPLL0_DIV_CLK_SRC 40
+#define GCC_MSS_MFAB_AXIS_CLK 41
+#define GCC_MSS_Q6_MEMNOC_AXI_CLK 42
+#define GCC_MSS_SNOC_AXI_CLK 43
+#define GCC_PCIE_0_AUX_CLK 44
+#define GCC_PCIE_0_AUX_CLK_SRC 45
+#define GCC_PCIE_0_CFG_AHB_CLK 46
+#define GCC_PCIE_0_CLKREF_CLK 47
+#define GCC_PCIE_0_MSTR_AXI_CLK 48
+#define GCC_PCIE_0_PIPE_CLK 49
+#define GCC_PCIE_0_SLV_AXI_CLK 50
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 51
+#define GCC_PCIE_1_AUX_CLK 52
+#define GCC_PCIE_1_AUX_CLK_SRC 53
+#define GCC_PCIE_1_CFG_AHB_CLK 54
+#define GCC_PCIE_1_CLKREF_CLK 55
+#define GCC_PCIE_1_MSTR_AXI_CLK 56
+#define GCC_PCIE_1_PIPE_CLK 57
+#define GCC_PCIE_1_SLV_AXI_CLK 58
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 59
+#define GCC_PCIE_PHY_AUX_CLK 60
+#define GCC_PCIE_PHY_REFGEN_CLK 61
+#define GCC_PCIE_PHY_REFGEN_CLK_SRC 62
+#define GCC_PDM2_CLK 63
+#define GCC_PDM2_CLK_SRC 64
+#define GCC_PDM_AHB_CLK 65
+#define GCC_PDM_XO4_CLK 66
+#define GCC_PRNG_AHB_CLK 67
+#define GCC_QMIP_CAMERA_AHB_CLK 68
+#define GCC_QMIP_DISP_AHB_CLK 69
+#define GCC_QMIP_VIDEO_AHB_CLK 70
+#define GCC_QUPV3_WRAP0_S0_CLK 71
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 72
+#define GCC_QUPV3_WRAP0_S1_CLK 73
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 74
+#define GCC_QUPV3_WRAP0_S2_CLK 75
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 76
+#define GCC_QUPV3_WRAP0_S3_CLK 77
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 78
+#define GCC_QUPV3_WRAP0_S4_CLK 79
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 80
+#define GCC_QUPV3_WRAP0_S5_CLK 81
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 82
+#define GCC_QUPV3_WRAP0_S6_CLK 83
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 84
+#define GCC_QUPV3_WRAP0_S7_CLK 85
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 86
+#define GCC_QUPV3_WRAP1_S0_CLK 87
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 88
+#define GCC_QUPV3_WRAP1_S1_CLK 89
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 90
+#define GCC_QUPV3_WRAP1_S2_CLK 91
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 92
+#define GCC_QUPV3_WRAP1_S3_CLK 93
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 94
+#define GCC_QUPV3_WRAP1_S4_CLK 95
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 96
+#define GCC_QUPV3_WRAP1_S5_CLK 97
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 98
+#define GCC_QUPV3_WRAP1_S6_CLK 99
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 100
+#define GCC_QUPV3_WRAP1_S7_CLK 101
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 102
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 103
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 104
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 105
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 106
+#define GCC_SDCC2_AHB_CLK 107
+#define GCC_SDCC2_APPS_CLK 108
+#define GCC_SDCC2_APPS_CLK_SRC 109
+#define GCC_SDCC4_AHB_CLK 110
+#define GCC_SDCC4_APPS_CLK 111
+#define GCC_SDCC4_APPS_CLK_SRC 112
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 113
+#define GCC_TSIF_AHB_CLK 114
+#define GCC_TSIF_INACTIVITY_TIMERS_CLK 115
+#define GCC_TSIF_REF_CLK 116
+#define GCC_TSIF_REF_CLK_SRC 117
+#define GCC_UFS_CARD_AHB_CLK 118
+#define GCC_UFS_CARD_AXI_CLK 119
+#define GCC_UFS_CARD_AXI_CLK_SRC 120
+#define GCC_UFS_CARD_CLKREF_CLK 121
+#define GCC_UFS_CARD_ICE_CORE_CLK 122
+#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 123
+#define GCC_UFS_CARD_PHY_AUX_CLK 124
+#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 125
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 126
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 127
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 128
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK 129
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 130
+#define GCC_UFS_MEM_CLKREF_CLK 131
+#define GCC_UFS_PHY_AHB_CLK 132
+#define GCC_UFS_PHY_AXI_CLK 133
+#define GCC_UFS_PHY_AXI_CLK_SRC 134
+#define GCC_UFS_PHY_ICE_CORE_CLK 135
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 136
+#define GCC_UFS_PHY_PHY_AUX_CLK 137
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 138
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 139
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 140
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 141
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 142
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 143
+#define GCC_USB30_PRIM_MASTER_CLK 144
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 145
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 146
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 147
+#define GCC_USB30_PRIM_SLEEP_CLK 148
+#define GCC_USB30_SEC_MASTER_CLK 149
+#define GCC_USB30_SEC_MASTER_CLK_SRC 150
+#define GCC_USB30_SEC_MOCK_UTMI_CLK 151
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 152
+#define GCC_USB30_SEC_SLEEP_CLK 153
+#define GCC_USB3_PRIM_CLKREF_CLK 154
+#define GCC_USB3_PRIM_PHY_AUX_CLK 155
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 156
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 157
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 158
+#define GCC_USB3_SEC_CLKREF_CLK 159
+#define GCC_USB3_SEC_PHY_AUX_CLK 160
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 161
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK 162
+#define GCC_USB3_SEC_PHY_PIPE_CLK 163
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK 164
+#define GCC_VIDEO_AHB_CLK 165
+#define GCC_VIDEO_AXI_CLK 166
+#define GCC_VIDEO_XO_CLK 167
+#define GPLL0 168
+#define GPLL0_OUT_EVEN 169
+#define GPLL0_OUT_MAIN 170
+#define GCC_GPU_IREF_CLK 171
+#define GCC_SDCC1_AHB_CLK 172
+#define GCC_SDCC1_APPS_CLK 173
+#define GCC_SDCC1_ICE_CORE_CLK 174
+#define GCC_SDCC1_APPS_CLK_SRC 175
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 176
+#define GCC_APC_VS_CLK 177
+#define GCC_GPU_VS_CLK 178
+#define GCC_MSS_VS_CLK 179
+#define GCC_VDDA_VS_CLK 180
+#define GCC_VDDCX_VS_CLK 181
+#define GCC_VDDMX_VS_CLK 182
+#define GCC_VS_CTRL_AHB_CLK 183
+#define GCC_VS_CTRL_CLK 184
+#define GCC_VS_CTRL_CLK_SRC 185
+#define GCC_VSENSOR_CLK_SRC 186
+#define GPLL4 187
+
+/* GCC reset clocks */
+=======
+#ifndef _DT_BINDINGS_CLK_SDM_GCC_SDM845_H
+#define _DT_BINDINGS_CLK_SDM_GCC_SDM845_H
+
+/* GCC clock registers */
+#define GCC_AGGRE_NOC_PCIE_TBU_CLK 0
+#define GCC_AGGRE_UFS_CARD_AXI_CLK 1
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 2
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 3
+#define GCC_AGGRE_USB3_SEC_AXI_CLK 4
+#define GCC_BOOT_ROM_AHB_CLK 5
+#define GCC_CAMERA_AHB_CLK 6
+#define GCC_CAMERA_AXI_CLK 7
+#define GCC_CAMERA_XO_CLK 8
+#define GCC_CE1_AHB_CLK 9
+#define GCC_CE1_AXI_CLK 10
+#define GCC_CE1_CLK 11
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 12
+#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 13
+#define GCC_CPUSS_AHB_CLK 14
+#define GCC_CPUSS_AHB_CLK_SRC 15
+#define GCC_CPUSS_RBCPR_CLK 16
+#define GCC_CPUSS_RBCPR_CLK_SRC 17
+#define GCC_DDRSS_GPU_AXI_CLK 18
+#define GCC_DISP_AHB_CLK 19
+#define GCC_DISP_AXI_CLK 20
+#define GCC_DISP_GPLL0_CLK_SRC 21
+#define GCC_DISP_GPLL0_DIV_CLK_SRC 22
+#define GCC_DISP_XO_CLK 23
+#define GCC_GP1_CLK 24
+#define GCC_GP1_CLK_SRC 25
+#define GCC_GP2_CLK 26
+#define GCC_GP2_CLK_SRC 27
+#define GCC_GP3_CLK 28
+#define GCC_GP3_CLK_SRC 29
+#define GCC_GPU_CFG_AHB_CLK 30
+#define GCC_GPU_GPLL0_CLK_SRC 31
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 32
+#define GCC_GPU_MEMNOC_GFX_CLK 33
+#define GCC_GPU_SNOC_DVM_GFX_CLK 34
+#define GCC_MSS_AXIS2_CLK 35
+#define GCC_MSS_CFG_AHB_CLK 36
+#define GCC_MSS_GPLL0_DIV_CLK_SRC 37
+#define GCC_MSS_MFAB_AXIS_CLK 38
+#define GCC_MSS_Q6_MEMNOC_AXI_CLK 39
+#define GCC_MSS_SNOC_AXI_CLK 40
+#define GCC_PCIE_0_AUX_CLK 41
+#define GCC_PCIE_0_AUX_CLK_SRC 42
+#define GCC_PCIE_0_CFG_AHB_CLK 43
+#define GCC_PCIE_0_CLKREF_CLK 44
+#define GCC_PCIE_0_MSTR_AXI_CLK 45
+#define GCC_PCIE_0_PIPE_CLK 46
+#define GCC_PCIE_0_SLV_AXI_CLK 47
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 48
+#define GCC_PCIE_1_AUX_CLK 49
+#define GCC_PCIE_1_AUX_CLK_SRC 50
+#define GCC_PCIE_1_CFG_AHB_CLK 51
+#define GCC_PCIE_1_CLKREF_CLK 52
+#define GCC_PCIE_1_MSTR_AXI_CLK 53
+#define GCC_PCIE_1_PIPE_CLK 54
+#define GCC_PCIE_1_SLV_AXI_CLK 55
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 56
+#define GCC_PCIE_PHY_AUX_CLK 57
+#define GCC_PCIE_PHY_REFGEN_CLK 58
+#define GCC_PCIE_PHY_REFGEN_CLK_SRC 59
+#define GCC_PDM2_CLK 60
+#define GCC_PDM2_CLK_SRC 61
+#define GCC_PDM_AHB_CLK 62
+#define GCC_PDM_XO4_CLK 63
+#define GCC_PRNG_AHB_CLK 64
+#define GCC_QMIP_CAMERA_AHB_CLK 65
+#define GCC_QMIP_DISP_AHB_CLK 66
+#define GCC_QMIP_VIDEO_AHB_CLK 67
+#define GCC_QUPV3_WRAP0_S0_CLK 68
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 69
+#define GCC_QUPV3_WRAP0_S1_CLK 70
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 71
+#define GCC_QUPV3_WRAP0_S2_CLK 72
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 73
+#define GCC_QUPV3_WRAP0_S3_CLK 74
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 75
+#define GCC_QUPV3_WRAP0_S4_CLK 76
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 77
+#define GCC_QUPV3_WRAP0_S5_CLK 78
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 79
+#define GCC_QUPV3_WRAP0_S6_CLK 80
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 81
+#define GCC_QUPV3_WRAP0_S7_CLK 82
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 83
+#define GCC_QUPV3_WRAP1_S0_CLK 84
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 85
+#define GCC_QUPV3_WRAP1_S1_CLK 86
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 87
+#define GCC_QUPV3_WRAP1_S2_CLK 88
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 89
+#define GCC_QUPV3_WRAP1_S3_CLK 90
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 91
+#define GCC_QUPV3_WRAP1_S4_CLK 92
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 93
+#define GCC_QUPV3_WRAP1_S5_CLK 94
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 95
+#define GCC_QUPV3_WRAP1_S6_CLK 96
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 97
+#define GCC_QUPV3_WRAP1_S7_CLK 98
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 99
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 100
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 101
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 102
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 103
+#define GCC_SDCC2_AHB_CLK 104
+#define GCC_SDCC2_APPS_CLK 105
+#define GCC_SDCC2_APPS_CLK_SRC 106
+#define GCC_SDCC4_AHB_CLK 107
+#define GCC_SDCC4_APPS_CLK 108
+#define GCC_SDCC4_APPS_CLK_SRC 109
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 110
+#define GCC_TSIF_AHB_CLK 111
+#define GCC_TSIF_INACTIVITY_TIMERS_CLK 112
+#define GCC_TSIF_REF_CLK 113
+#define GCC_TSIF_REF_CLK_SRC 114
+#define GCC_UFS_CARD_AHB_CLK 115
+#define GCC_UFS_CARD_AXI_CLK 116
+#define GCC_UFS_CARD_AXI_CLK_SRC 117
+#define GCC_UFS_CARD_CLKREF_CLK 118
+#define GCC_UFS_CARD_ICE_CORE_CLK 119
+#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 120
+#define GCC_UFS_CARD_PHY_AUX_CLK 121
+#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 122
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 123
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 124
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 125
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK 126
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 127
+#define GCC_UFS_MEM_CLKREF_CLK 128
+#define GCC_UFS_PHY_AHB_CLK 129
+#define GCC_UFS_PHY_AXI_CLK 130
+#define GCC_UFS_PHY_AXI_CLK_SRC 131
+#define GCC_UFS_PHY_ICE_CORE_CLK 132
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 133
+#define GCC_UFS_PHY_PHY_AUX_CLK 134
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 135
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 136
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 137
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 138
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 139
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 140
+#define GCC_USB30_PRIM_MASTER_CLK 141
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 142
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 143
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 144
+#define GCC_USB30_PRIM_SLEEP_CLK 145
+#define GCC_USB30_SEC_MASTER_CLK 146
+#define GCC_USB30_SEC_MASTER_CLK_SRC 147
+#define GCC_USB30_SEC_MOCK_UTMI_CLK 148
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 149
+#define GCC_USB30_SEC_SLEEP_CLK 150
+#define GCC_USB3_PRIM_CLKREF_CLK 151
+#define GCC_USB3_PRIM_PHY_AUX_CLK 152
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 153
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 154
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 155
+#define GCC_USB3_SEC_CLKREF_CLK 156
+#define GCC_USB3_SEC_PHY_AUX_CLK 157
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 158
+#define GCC_USB3_SEC_PHY_PIPE_CLK 159
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK 160
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK 161
+#define GCC_VIDEO_AHB_CLK 162
+#define GCC_VIDEO_AXI_CLK 163
+#define GCC_VIDEO_XO_CLK 164
+#define GPLL0 165
+#define GPLL0_OUT_EVEN 166
+#define GPLL0_OUT_MAIN 167
+#define GCC_GPU_IREF_CLK 168
+#define GCC_SDCC1_AHB_CLK 169
+#define GCC_SDCC1_APPS_CLK 170
+#define GCC_SDCC1_ICE_CORE_CLK 171
+#define GCC_SDCC1_APPS_CLK_SRC 172
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 173
+#define GCC_APC_VS_CLK 174
+#define GCC_GPU_VS_CLK 175
+#define GCC_MSS_VS_CLK 176
+#define GCC_VDDA_VS_CLK 177
+#define GCC_VDDCX_VS_CLK 178
+#define GCC_VDDMX_VS_CLK 179
+#define GCC_VS_CTRL_AHB_CLK 180
+#define GCC_VS_CTRL_CLK 181
+#define GCC_VS_CTRL_CLK_SRC 182
+#define GCC_VSENSOR_CLK_SRC 183
+#define GPLL4 184
+
+/* GCC Resets */
+>>>>>>>
+#define GCC_MMSS_BCR 0
+#define GCC_PCIE_0_BCR 1
+#define GCC_PCIE_1_BCR 2
+#define GCC_PCIE_PHY_BCR 3
+#define GCC_PDM_BCR 4
+#define GCC_PRNG_BCR 5
+#define GCC_QUPV3_WRAPPER_0_BCR 6
+#define GCC_QUPV3_WRAPPER_1_BCR 7
+#define GCC_QUSB2PHY_PRIM_BCR 8
+#define GCC_QUSB2PHY_SEC_BCR 9
+#define GCC_SDCC2_BCR 10
+#define GCC_SDCC4_BCR 11
+#define GCC_TSIF_BCR 12
+#define GCC_UFS_CARD_BCR 13
+#define GCC_UFS_PHY_BCR 14
+#define GCC_USB30_PRIM_BCR 15
+#define GCC_USB30_SEC_BCR 16
+#define GCC_USB3_PHY_PRIM_BCR 17
+#define GCC_USB3PHY_PHY_PRIM_BCR 18
+#define GCC_USB3_DP_PHY_PRIM_BCR 19
+#define GCC_USB3_PHY_SEC_BCR 20
+#define GCC_USB3PHY_PHY_SEC_BCR 21
+#define GCC_USB3_DP_PHY_SEC_BCR 22
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 23
+#define GCC_PCIE_0_PHY_BCR 24
+#define GCC_PCIE_1_PHY_BCR 25
+
+/* GCC GDSCRs */
+#define PCIE_0_GDSC 0
+#define PCIE_1_GDSC 1
+#define UFS_CARD_GDSC 2
+#define UFS_PHY_GDSC 3
+#define USB30_PRIM_GDSC 4
+#define USB30_SEC_GDSC 5
+#define HLOS1_VOTE_AGGRE_NOC_MMU_AUDIO_TBU_GDSC 6
+#define HLOS1_VOTE_AGGRE_NOC_MMU_PCIE_TBU_GDSC 7
+#define HLOS1_VOTE_AGGRE_NOC_MMU_TBU1_GDSC 8
+#define HLOS1_VOTE_AGGRE_NOC_MMU_TBU2_GDSC 9
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC 10
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC 11
+#define HLOS1_VOTE_MMNOC_MMU_TBU_SF_GDSC 12
+
+#endif
diff --git a/rr-cache/1a12c9e7bc9e61f3fd259e77b7e798a449dd5e0d/preimage.1 b/rr-cache/1a12c9e7bc9e61f3fd259e77b7e798a449dd5e0d/preimage.1
new file mode 100644
index 0000000..e43c9f4
--- /dev/null
+++ b/rr-cache/1a12c9e7bc9e61f3fd259e77b7e798a449dd5e0d/preimage.1
@@ -0,0 +1,440 @@
+<<<<<<<
+/* SPDX-License-Identifier: GPL-2.0 */
+=======
+// SPDX-License-Identifier: GPL-2.0
+>>>>>>>
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+<<<<<<<
+#ifndef _DT_BINDINGS_CLK_MSM_GCC_SDM845_H
+#define _DT_BINDINGS_CLK_MSM_GCC_SDM845_H
+
+/* GCC clock registers */
+#define BI_TCXO 0
+#define GCC_AGGRE_NOC_PCIE_TBU_CLK 1
+#define GCC_AGGRE_UFS_CARD_AXI_CLK 2
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 3
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 4
+#define GCC_AGGRE_USB3_SEC_AXI_CLK 5
+#define GCC_BOOT_ROM_AHB_CLK 6
+#define GCC_CAMERA_AHB_CLK 7
+#define GCC_CAMERA_AXI_CLK 8
+#define GCC_CAMERA_XO_CLK 9
+#define GCC_CE1_AHB_CLK 10
+#define GCC_CE1_AXI_CLK 11
+#define GCC_CE1_CLK 12
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 13
+#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 14
+#define GCC_CPUSS_AHB_CLK 15
+#define GCC_CPUSS_AHB_CLK_SRC 16
+#define GCC_CPUSS_DVM_BUS_CLK 17
+#define GCC_CPUSS_GNOC_CLK 18
+#define GCC_CPUSS_RBCPR_CLK 19
+#define GCC_CPUSS_RBCPR_CLK_SRC 20
+#define GCC_DDRSS_GPU_AXI_CLK 21
+#define GCC_DISP_AHB_CLK 22
+#define GCC_DISP_AXI_CLK 23
+#define GCC_DISP_GPLL0_CLK_SRC 24
+#define GCC_DISP_GPLL0_DIV_CLK_SRC 25
+#define GCC_DISP_XO_CLK 26
+#define GCC_GP1_CLK 27
+#define GCC_GP1_CLK_SRC 28
+#define GCC_GP2_CLK 29
+#define GCC_GP2_CLK_SRC 30
+#define GCC_GP3_CLK 31
+#define GCC_GP3_CLK_SRC 32
+#define GCC_GPU_CFG_AHB_CLK 33
+#define GCC_GPU_GPLL0_CLK_SRC 34
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 35
+#define GCC_GPU_MEMNOC_GFX_CLK 36
+#define GCC_GPU_SNOC_DVM_GFX_CLK 37
+#define GCC_MSS_AXIS2_CLK 38
+#define GCC_MSS_CFG_AHB_CLK 39
+#define GCC_MSS_GPLL0_DIV_CLK_SRC 40
+#define GCC_MSS_MFAB_AXIS_CLK 41
+#define GCC_MSS_Q6_MEMNOC_AXI_CLK 42
+#define GCC_MSS_SNOC_AXI_CLK 43
+#define GCC_PCIE_0_AUX_CLK 44
+#define GCC_PCIE_0_AUX_CLK_SRC 45
+#define GCC_PCIE_0_CFG_AHB_CLK 46
+#define GCC_PCIE_0_CLKREF_CLK 47
+#define GCC_PCIE_0_MSTR_AXI_CLK 48
+#define GCC_PCIE_0_PIPE_CLK 49
+#define GCC_PCIE_0_SLV_AXI_CLK 50
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 51
+#define GCC_PCIE_1_AUX_CLK 52
+#define GCC_PCIE_1_AUX_CLK_SRC 53
+#define GCC_PCIE_1_CFG_AHB_CLK 54
+#define GCC_PCIE_1_CLKREF_CLK 55
+#define GCC_PCIE_1_MSTR_AXI_CLK 56
+#define GCC_PCIE_1_PIPE_CLK 57
+#define GCC_PCIE_1_SLV_AXI_CLK 58
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 59
+#define GCC_PCIE_PHY_AUX_CLK 60
+#define GCC_PCIE_PHY_REFGEN_CLK 61
+#define GCC_PCIE_PHY_REFGEN_CLK_SRC 62
+#define GCC_PDM2_CLK 63
+#define GCC_PDM2_CLK_SRC 64
+#define GCC_PDM_AHB_CLK 65
+#define GCC_PDM_XO4_CLK 66
+#define GCC_PRNG_AHB_CLK 67
+#define GCC_QMIP_CAMERA_AHB_CLK 68
+#define GCC_QMIP_DISP_AHB_CLK 69
+#define GCC_QMIP_VIDEO_AHB_CLK 70
+#define GCC_QUPV3_WRAP0_S0_CLK 71
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 72
+#define GCC_QUPV3_WRAP0_S1_CLK 73
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 74
+#define GCC_QUPV3_WRAP0_S2_CLK 75
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 76
+#define GCC_QUPV3_WRAP0_S3_CLK 77
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 78
+#define GCC_QUPV3_WRAP0_S4_CLK 79
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 80
+#define GCC_QUPV3_WRAP0_S5_CLK 81
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 82
+#define GCC_QUPV3_WRAP0_S6_CLK 83
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 84
+#define GCC_QUPV3_WRAP0_S7_CLK 85
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 86
+#define GCC_QUPV3_WRAP1_S0_CLK 87
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 88
+#define GCC_QUPV3_WRAP1_S1_CLK 89
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 90
+#define GCC_QUPV3_WRAP1_S2_CLK 91
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 92
+#define GCC_QUPV3_WRAP1_S3_CLK 93
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 94
+#define GCC_QUPV3_WRAP1_S4_CLK 95
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 96
+#define GCC_QUPV3_WRAP1_S5_CLK 97
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 98
+#define GCC_QUPV3_WRAP1_S6_CLK 99
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 100
+#define GCC_QUPV3_WRAP1_S7_CLK 101
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 102
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 103
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 104
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 105
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 106
+#define GCC_SDCC2_AHB_CLK 107
+#define GCC_SDCC2_APPS_CLK 108
+#define GCC_SDCC2_APPS_CLK_SRC 109
+#define GCC_SDCC4_AHB_CLK 110
+#define GCC_SDCC4_APPS_CLK 111
+#define GCC_SDCC4_APPS_CLK_SRC 112
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 113
+#define GCC_TSIF_AHB_CLK 114
+#define GCC_TSIF_INACTIVITY_TIMERS_CLK 115
+#define GCC_TSIF_REF_CLK 116
+#define GCC_TSIF_REF_CLK_SRC 117
+#define GCC_UFS_CARD_AHB_CLK 118
+#define GCC_UFS_CARD_AXI_CLK 119
+#define GCC_UFS_CARD_AXI_CLK_SRC 120
+#define GCC_UFS_CARD_CLKREF_CLK 121
+#define GCC_UFS_CARD_ICE_CORE_CLK 122
+#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 123
+#define GCC_UFS_CARD_PHY_AUX_CLK 124
+#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 125
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 126
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 127
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 128
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK 129
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 130
+#define GCC_UFS_MEM_CLKREF_CLK 131
+#define GCC_UFS_PHY_AHB_CLK 132
+#define GCC_UFS_PHY_AXI_CLK 133
+#define GCC_UFS_PHY_AXI_CLK_SRC 134
+#define GCC_UFS_PHY_ICE_CORE_CLK 135
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 136
+#define GCC_UFS_PHY_PHY_AUX_CLK 137
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 138
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 139
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 140
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 141
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 142
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 143
+#define GCC_USB30_PRIM_MASTER_CLK 144
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 145
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 146
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 147
+#define GCC_USB30_PRIM_SLEEP_CLK 148
+#define GCC_USB30_SEC_MASTER_CLK 149
+#define GCC_USB30_SEC_MASTER_CLK_SRC 150
+#define GCC_USB30_SEC_MOCK_UTMI_CLK 151
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 152
+#define GCC_USB30_SEC_SLEEP_CLK 153
+#define GCC_USB3_PRIM_CLKREF_CLK 154
+#define GCC_USB3_PRIM_PHY_AUX_CLK 155
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 156
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 157
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 158
+#define GCC_USB3_SEC_CLKREF_CLK 159
+#define GCC_USB3_SEC_PHY_AUX_CLK 160
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 161
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK 162
+#define GCC_USB3_SEC_PHY_PIPE_CLK 163
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK 164
+#define GCC_VIDEO_AHB_CLK 165
+#define GCC_VIDEO_AXI_CLK 166
+#define GCC_VIDEO_XO_CLK 167
+#define GPLL0 168
+#define GPLL0_OUT_EVEN 169
+#define GPLL0_OUT_MAIN 170
+#define GCC_GPU_IREF_CLK 171
+#define GCC_SDCC1_AHB_CLK 172
+#define GCC_SDCC1_APPS_CLK 173
+#define GCC_SDCC1_ICE_CORE_CLK 174
+#define GCC_SDCC1_APPS_CLK_SRC 175
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 176
+#define GCC_APC_VS_CLK 177
+#define GCC_GPU_VS_CLK 178
+#define GCC_MSS_VS_CLK 179
+#define GCC_VDDA_VS_CLK 180
+#define GCC_VDDCX_VS_CLK 181
+#define GCC_VDDMX_VS_CLK 182
+#define GCC_VS_CTRL_AHB_CLK 183
+#define GCC_VS_CTRL_CLK 184
+#define GCC_VS_CTRL_CLK_SRC 185
+#define GCC_VSENSOR_CLK_SRC 186
+#define GPLL4 187
+
+/* GCC reset clocks */
+=======
+#ifndef _DT_BINDINGS_CLK_SDM_GCC_SDM845_H
+#define _DT_BINDINGS_CLK_SDM_GCC_SDM845_H
+
+/* GCC clock registers */
+#define GCC_AGGRE_NOC_PCIE_TBU_CLK 0
+#define GCC_AGGRE_UFS_CARD_AXI_CLK 1
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 2
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 3
+#define GCC_AGGRE_USB3_SEC_AXI_CLK 4
+#define GCC_BOOT_ROM_AHB_CLK 5
+#define GCC_CAMERA_AHB_CLK 6
+#define GCC_CAMERA_AXI_CLK 7
+#define GCC_CAMERA_XO_CLK 8
+#define GCC_CE1_AHB_CLK 9
+#define GCC_CE1_AXI_CLK 10
+#define GCC_CE1_CLK 11
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 12
+#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 13
+#define GCC_CPUSS_AHB_CLK 14
+#define GCC_CPUSS_AHB_CLK_SRC 15
+#define GCC_CPUSS_RBCPR_CLK 16
+#define GCC_CPUSS_RBCPR_CLK_SRC 17
+#define GCC_DDRSS_GPU_AXI_CLK 18
+#define GCC_DISP_AHB_CLK 19
+#define GCC_DISP_AXI_CLK 20
+#define GCC_DISP_GPLL0_CLK_SRC 21
+#define GCC_DISP_GPLL0_DIV_CLK_SRC 22
+#define GCC_DISP_XO_CLK 23
+#define GCC_GP1_CLK 24
+#define GCC_GP1_CLK_SRC 25
+#define GCC_GP2_CLK 26
+#define GCC_GP2_CLK_SRC 27
+#define GCC_GP3_CLK 28
+#define GCC_GP3_CLK_SRC 29
+#define GCC_GPU_CFG_AHB_CLK 30
+#define GCC_GPU_GPLL0_CLK_SRC 31
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 32
+#define GCC_GPU_MEMNOC_GFX_CLK 33
+#define GCC_GPU_SNOC_DVM_GFX_CLK 34
+#define GCC_MSS_AXIS2_CLK 35
+#define GCC_MSS_CFG_AHB_CLK 36
+#define GCC_MSS_GPLL0_DIV_CLK_SRC 37
+#define GCC_MSS_MFAB_AXIS_CLK 38
+#define GCC_MSS_Q6_MEMNOC_AXI_CLK 39
+#define GCC_MSS_SNOC_AXI_CLK 40
+#define GCC_PCIE_0_AUX_CLK 41
+#define GCC_PCIE_0_AUX_CLK_SRC 42
+#define GCC_PCIE_0_CFG_AHB_CLK 43
+#define GCC_PCIE_0_CLKREF_CLK 44
+#define GCC_PCIE_0_MSTR_AXI_CLK 45
+#define GCC_PCIE_0_PIPE_CLK 46
+#define GCC_PCIE_0_SLV_AXI_CLK 47
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 48
+#define GCC_PCIE_1_AUX_CLK 49
+#define GCC_PCIE_1_AUX_CLK_SRC 50
+#define GCC_PCIE_1_CFG_AHB_CLK 51
+#define GCC_PCIE_1_CLKREF_CLK 52
+#define GCC_PCIE_1_MSTR_AXI_CLK 53
+#define GCC_PCIE_1_PIPE_CLK 54
+#define GCC_PCIE_1_SLV_AXI_CLK 55
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 56
+#define GCC_PCIE_PHY_AUX_CLK 57
+#define GCC_PCIE_PHY_REFGEN_CLK 58
+#define GCC_PCIE_PHY_REFGEN_CLK_SRC 59
+#define GCC_PDM2_CLK 60
+#define GCC_PDM2_CLK_SRC 61
+#define GCC_PDM_AHB_CLK 62
+#define GCC_PDM_XO4_CLK 63
+#define GCC_PRNG_AHB_CLK 64
+#define GCC_QMIP_CAMERA_AHB_CLK 65
+#define GCC_QMIP_DISP_AHB_CLK 66
+#define GCC_QMIP_VIDEO_AHB_CLK 67
+#define GCC_QUPV3_WRAP0_S0_CLK 68
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 69
+#define GCC_QUPV3_WRAP0_S1_CLK 70
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 71
+#define GCC_QUPV3_WRAP0_S2_CLK 72
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 73
+#define GCC_QUPV3_WRAP0_S3_CLK 74
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 75
+#define GCC_QUPV3_WRAP0_S4_CLK 76
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 77
+#define GCC_QUPV3_WRAP0_S5_CLK 78
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 79
+#define GCC_QUPV3_WRAP0_S6_CLK 80
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 81
+#define GCC_QUPV3_WRAP0_S7_CLK 82
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 83
+#define GCC_QUPV3_WRAP1_S0_CLK 84
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 85
+#define GCC_QUPV3_WRAP1_S1_CLK 86
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 87
+#define GCC_QUPV3_WRAP1_S2_CLK 88
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 89
+#define GCC_QUPV3_WRAP1_S3_CLK 90
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 91
+#define GCC_QUPV3_WRAP1_S4_CLK 92
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 93
+#define GCC_QUPV3_WRAP1_S5_CLK 94
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 95
+#define GCC_QUPV3_WRAP1_S6_CLK 96
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 97
+#define GCC_QUPV3_WRAP1_S7_CLK 98
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 99
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 100
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 101
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 102
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 103
+#define GCC_SDCC2_AHB_CLK 104
+#define GCC_SDCC2_APPS_CLK 105
+#define GCC_SDCC2_APPS_CLK_SRC 106
+#define GCC_SDCC4_AHB_CLK 107
+#define GCC_SDCC4_APPS_CLK 108
+#define GCC_SDCC4_APPS_CLK_SRC 109
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 110
+#define GCC_TSIF_AHB_CLK 111
+#define GCC_TSIF_INACTIVITY_TIMERS_CLK 112
+#define GCC_TSIF_REF_CLK 113
+#define GCC_TSIF_REF_CLK_SRC 114
+#define GCC_UFS_CARD_AHB_CLK 115
+#define GCC_UFS_CARD_AXI_CLK 116
+#define GCC_UFS_CARD_AXI_CLK_SRC 117
+#define GCC_UFS_CARD_CLKREF_CLK 118
+#define GCC_UFS_CARD_ICE_CORE_CLK 119
+#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 120
+#define GCC_UFS_CARD_PHY_AUX_CLK 121
+#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 122
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 123
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 124
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 125
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK 126
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 127
+#define GCC_UFS_MEM_CLKREF_CLK 128
+#define GCC_UFS_PHY_AHB_CLK 129
+#define GCC_UFS_PHY_AXI_CLK 130
+#define GCC_UFS_PHY_AXI_CLK_SRC 131
+#define GCC_UFS_PHY_ICE_CORE_CLK 132
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 133
+#define GCC_UFS_PHY_PHY_AUX_CLK 134
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 135
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 136
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 137
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 138
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 139
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 140
+#define GCC_USB30_PRIM_MASTER_CLK 141
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 142
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 143
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 144
+#define GCC_USB30_PRIM_SLEEP_CLK 145
+#define GCC_USB30_SEC_MASTER_CLK 146
+#define GCC_USB30_SEC_MASTER_CLK_SRC 147
+#define GCC_USB30_SEC_MOCK_UTMI_CLK 148
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 149
+#define GCC_USB30_SEC_SLEEP_CLK 150
+#define GCC_USB3_PRIM_CLKREF_CLK 151
+#define GCC_USB3_PRIM_PHY_AUX_CLK 152
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 153
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 154
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 155
+#define GCC_USB3_SEC_CLKREF_CLK 156
+#define GCC_USB3_SEC_PHY_AUX_CLK 157
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 158
+#define GCC_USB3_SEC_PHY_PIPE_CLK 159
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK 160
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK 161
+#define GCC_VIDEO_AHB_CLK 162
+#define GCC_VIDEO_AXI_CLK 163
+#define GCC_VIDEO_XO_CLK 164
+#define GPLL0 165
+#define GPLL0_OUT_EVEN 166
+#define GPLL0_OUT_MAIN 167
+#define GCC_GPU_IREF_CLK 168
+#define GCC_SDCC1_AHB_CLK 169
+#define GCC_SDCC1_APPS_CLK 170
+#define GCC_SDCC1_ICE_CORE_CLK 171
+#define GCC_SDCC1_APPS_CLK_SRC 172
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 173
+#define GCC_APC_VS_CLK 174
+#define GCC_GPU_VS_CLK 175
+#define GCC_MSS_VS_CLK 176
+#define GCC_VDDA_VS_CLK 177
+#define GCC_VDDCX_VS_CLK 178
+#define GCC_VDDMX_VS_CLK 179
+#define GCC_VS_CTRL_AHB_CLK 180
+#define GCC_VS_CTRL_CLK 181
+#define GCC_VS_CTRL_CLK_SRC 182
+#define GCC_VSENSOR_CLK_SRC 183
+#define GPLL4 184
+
+/* GCC Resets */
+>>>>>>>
+#define GCC_MMSS_BCR 0
+#define GCC_PCIE_0_BCR 1
+#define GCC_PCIE_1_BCR 2
+#define GCC_PCIE_PHY_BCR 3
+#define GCC_PDM_BCR 4
+#define GCC_PRNG_BCR 5
+#define GCC_QUPV3_WRAPPER_0_BCR 6
+#define GCC_QUPV3_WRAPPER_1_BCR 7
+#define GCC_QUSB2PHY_PRIM_BCR 8
+#define GCC_QUSB2PHY_SEC_BCR 9
+#define GCC_SDCC2_BCR 10
+#define GCC_SDCC4_BCR 11
+#define GCC_TSIF_BCR 12
+#define GCC_UFS_CARD_BCR 13
+#define GCC_UFS_PHY_BCR 14
+#define GCC_USB30_PRIM_BCR 15
+#define GCC_USB30_SEC_BCR 16
+#define GCC_USB3_PHY_PRIM_BCR 17
+#define GCC_USB3PHY_PHY_PRIM_BCR 18
+#define GCC_USB3_DP_PHY_PRIM_BCR 19
+#define GCC_USB3_PHY_SEC_BCR 20
+#define GCC_USB3PHY_PHY_SEC_BCR 21
+#define GCC_USB3_DP_PHY_SEC_BCR 22
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 23
+#define GCC_PCIE_0_PHY_BCR 24
+#define GCC_PCIE_1_PHY_BCR 25
+
+/* GCC GDSCRs */
+#define PCIE_0_GDSC 0
+#define PCIE_1_GDSC 1
+#define UFS_CARD_GDSC 2
+#define UFS_PHY_GDSC 3
+#define USB30_PRIM_GDSC 4
+#define USB30_SEC_GDSC 5
+#define HLOS1_VOTE_AGGRE_NOC_MMU_AUDIO_TBU_GDSC 6
+#define HLOS1_VOTE_AGGRE_NOC_MMU_PCIE_TBU_GDSC 7
+#define HLOS1_VOTE_AGGRE_NOC_MMU_TBU1_GDSC 8
+#define HLOS1_VOTE_AGGRE_NOC_MMU_TBU2_GDSC 9
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC 10
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC 11
+#define HLOS1_VOTE_MMNOC_MMU_TBU_SF_GDSC 12
+
+#endif
diff --git a/rr-cache/1a12c9e7bc9e61f3fd259e77b7e798a449dd5e0d/preimage.2 b/rr-cache/1a12c9e7bc9e61f3fd259e77b7e798a449dd5e0d/preimage.2
new file mode 100644
index 0000000..e43c9f4
--- /dev/null
+++ b/rr-cache/1a12c9e7bc9e61f3fd259e77b7e798a449dd5e0d/preimage.2
@@ -0,0 +1,440 @@
+<<<<<<<
+/* SPDX-License-Identifier: GPL-2.0 */
+=======
+// SPDX-License-Identifier: GPL-2.0
+>>>>>>>
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+<<<<<<<
+#ifndef _DT_BINDINGS_CLK_MSM_GCC_SDM845_H
+#define _DT_BINDINGS_CLK_MSM_GCC_SDM845_H
+
+/* GCC clock registers */
+#define BI_TCXO 0
+#define GCC_AGGRE_NOC_PCIE_TBU_CLK 1
+#define GCC_AGGRE_UFS_CARD_AXI_CLK 2
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 3
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 4
+#define GCC_AGGRE_USB3_SEC_AXI_CLK 5
+#define GCC_BOOT_ROM_AHB_CLK 6
+#define GCC_CAMERA_AHB_CLK 7
+#define GCC_CAMERA_AXI_CLK 8
+#define GCC_CAMERA_XO_CLK 9
+#define GCC_CE1_AHB_CLK 10
+#define GCC_CE1_AXI_CLK 11
+#define GCC_CE1_CLK 12
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 13
+#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 14
+#define GCC_CPUSS_AHB_CLK 15
+#define GCC_CPUSS_AHB_CLK_SRC 16
+#define GCC_CPUSS_DVM_BUS_CLK 17
+#define GCC_CPUSS_GNOC_CLK 18
+#define GCC_CPUSS_RBCPR_CLK 19
+#define GCC_CPUSS_RBCPR_CLK_SRC 20
+#define GCC_DDRSS_GPU_AXI_CLK 21
+#define GCC_DISP_AHB_CLK 22
+#define GCC_DISP_AXI_CLK 23
+#define GCC_DISP_GPLL0_CLK_SRC 24
+#define GCC_DISP_GPLL0_DIV_CLK_SRC 25
+#define GCC_DISP_XO_CLK 26
+#define GCC_GP1_CLK 27
+#define GCC_GP1_CLK_SRC 28
+#define GCC_GP2_CLK 29
+#define GCC_GP2_CLK_SRC 30
+#define GCC_GP3_CLK 31
+#define GCC_GP3_CLK_SRC 32
+#define GCC_GPU_CFG_AHB_CLK 33
+#define GCC_GPU_GPLL0_CLK_SRC 34
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 35
+#define GCC_GPU_MEMNOC_GFX_CLK 36
+#define GCC_GPU_SNOC_DVM_GFX_CLK 37
+#define GCC_MSS_AXIS2_CLK 38
+#define GCC_MSS_CFG_AHB_CLK 39
+#define GCC_MSS_GPLL0_DIV_CLK_SRC 40
+#define GCC_MSS_MFAB_AXIS_CLK 41
+#define GCC_MSS_Q6_MEMNOC_AXI_CLK 42
+#define GCC_MSS_SNOC_AXI_CLK 43
+#define GCC_PCIE_0_AUX_CLK 44
+#define GCC_PCIE_0_AUX_CLK_SRC 45
+#define GCC_PCIE_0_CFG_AHB_CLK 46
+#define GCC_PCIE_0_CLKREF_CLK 47
+#define GCC_PCIE_0_MSTR_AXI_CLK 48
+#define GCC_PCIE_0_PIPE_CLK 49
+#define GCC_PCIE_0_SLV_AXI_CLK 50
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 51
+#define GCC_PCIE_1_AUX_CLK 52
+#define GCC_PCIE_1_AUX_CLK_SRC 53
+#define GCC_PCIE_1_CFG_AHB_CLK 54
+#define GCC_PCIE_1_CLKREF_CLK 55
+#define GCC_PCIE_1_MSTR_AXI_CLK 56
+#define GCC_PCIE_1_PIPE_CLK 57
+#define GCC_PCIE_1_SLV_AXI_CLK 58
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 59
+#define GCC_PCIE_PHY_AUX_CLK 60
+#define GCC_PCIE_PHY_REFGEN_CLK 61
+#define GCC_PCIE_PHY_REFGEN_CLK_SRC 62
+#define GCC_PDM2_CLK 63
+#define GCC_PDM2_CLK_SRC 64
+#define GCC_PDM_AHB_CLK 65
+#define GCC_PDM_XO4_CLK 66
+#define GCC_PRNG_AHB_CLK 67
+#define GCC_QMIP_CAMERA_AHB_CLK 68
+#define GCC_QMIP_DISP_AHB_CLK 69
+#define GCC_QMIP_VIDEO_AHB_CLK 70
+#define GCC_QUPV3_WRAP0_S0_CLK 71
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 72
+#define GCC_QUPV3_WRAP0_S1_CLK 73
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 74
+#define GCC_QUPV3_WRAP0_S2_CLK 75
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 76
+#define GCC_QUPV3_WRAP0_S3_CLK 77
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 78
+#define GCC_QUPV3_WRAP0_S4_CLK 79
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 80
+#define GCC_QUPV3_WRAP0_S5_CLK 81
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 82
+#define GCC_QUPV3_WRAP0_S6_CLK 83
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 84
+#define GCC_QUPV3_WRAP0_S7_CLK 85
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 86
+#define GCC_QUPV3_WRAP1_S0_CLK 87
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 88
+#define GCC_QUPV3_WRAP1_S1_CLK 89
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 90
+#define GCC_QUPV3_WRAP1_S2_CLK 91
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 92
+#define GCC_QUPV3_WRAP1_S3_CLK 93
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 94
+#define GCC_QUPV3_WRAP1_S4_CLK 95
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 96
+#define GCC_QUPV3_WRAP1_S5_CLK 97
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 98
+#define GCC_QUPV3_WRAP1_S6_CLK 99
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 100
+#define GCC_QUPV3_WRAP1_S7_CLK 101
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 102
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 103
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 104
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 105
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 106
+#define GCC_SDCC2_AHB_CLK 107
+#define GCC_SDCC2_APPS_CLK 108
+#define GCC_SDCC2_APPS_CLK_SRC 109
+#define GCC_SDCC4_AHB_CLK 110
+#define GCC_SDCC4_APPS_CLK 111
+#define GCC_SDCC4_APPS_CLK_SRC 112
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 113
+#define GCC_TSIF_AHB_CLK 114
+#define GCC_TSIF_INACTIVITY_TIMERS_CLK 115
+#define GCC_TSIF_REF_CLK 116
+#define GCC_TSIF_REF_CLK_SRC 117
+#define GCC_UFS_CARD_AHB_CLK 118
+#define GCC_UFS_CARD_AXI_CLK 119
+#define GCC_UFS_CARD_AXI_CLK_SRC 120
+#define GCC_UFS_CARD_CLKREF_CLK 121
+#define GCC_UFS_CARD_ICE_CORE_CLK 122
+#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 123
+#define GCC_UFS_CARD_PHY_AUX_CLK 124
+#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 125
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 126
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 127
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 128
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK 129
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 130
+#define GCC_UFS_MEM_CLKREF_CLK 131
+#define GCC_UFS_PHY_AHB_CLK 132
+#define GCC_UFS_PHY_AXI_CLK 133
+#define GCC_UFS_PHY_AXI_CLK_SRC 134
+#define GCC_UFS_PHY_ICE_CORE_CLK 135
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 136
+#define GCC_UFS_PHY_PHY_AUX_CLK 137
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 138
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 139
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 140
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 141
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 142
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 143
+#define GCC_USB30_PRIM_MASTER_CLK 144
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 145
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 146
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 147
+#define GCC_USB30_PRIM_SLEEP_CLK 148
+#define GCC_USB30_SEC_MASTER_CLK 149
+#define GCC_USB30_SEC_MASTER_CLK_SRC 150
+#define GCC_USB30_SEC_MOCK_UTMI_CLK 151
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 152
+#define GCC_USB30_SEC_SLEEP_CLK 153
+#define GCC_USB3_PRIM_CLKREF_CLK 154
+#define GCC_USB3_PRIM_PHY_AUX_CLK 155
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 156
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 157
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 158
+#define GCC_USB3_SEC_CLKREF_CLK 159
+#define GCC_USB3_SEC_PHY_AUX_CLK 160
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 161
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK 162
+#define GCC_USB3_SEC_PHY_PIPE_CLK 163
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK 164
+#define GCC_VIDEO_AHB_CLK 165
+#define GCC_VIDEO_AXI_CLK 166
+#define GCC_VIDEO_XO_CLK 167
+#define GPLL0 168
+#define GPLL0_OUT_EVEN 169
+#define GPLL0_OUT_MAIN 170
+#define GCC_GPU_IREF_CLK 171
+#define GCC_SDCC1_AHB_CLK 172
+#define GCC_SDCC1_APPS_CLK 173
+#define GCC_SDCC1_ICE_CORE_CLK 174
+#define GCC_SDCC1_APPS_CLK_SRC 175
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 176
+#define GCC_APC_VS_CLK 177
+#define GCC_GPU_VS_CLK 178
+#define GCC_MSS_VS_CLK 179
+#define GCC_VDDA_VS_CLK 180
+#define GCC_VDDCX_VS_CLK 181
+#define GCC_VDDMX_VS_CLK 182
+#define GCC_VS_CTRL_AHB_CLK 183
+#define GCC_VS_CTRL_CLK 184
+#define GCC_VS_CTRL_CLK_SRC 185
+#define GCC_VSENSOR_CLK_SRC 186
+#define GPLL4 187
+
+/* GCC reset clocks */
+=======
+#ifndef _DT_BINDINGS_CLK_SDM_GCC_SDM845_H
+#define _DT_BINDINGS_CLK_SDM_GCC_SDM845_H
+
+/* GCC clock registers */
+#define GCC_AGGRE_NOC_PCIE_TBU_CLK 0
+#define GCC_AGGRE_UFS_CARD_AXI_CLK 1
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 2
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 3
+#define GCC_AGGRE_USB3_SEC_AXI_CLK 4
+#define GCC_BOOT_ROM_AHB_CLK 5
+#define GCC_CAMERA_AHB_CLK 6
+#define GCC_CAMERA_AXI_CLK 7
+#define GCC_CAMERA_XO_CLK 8
+#define GCC_CE1_AHB_CLK 9
+#define GCC_CE1_AXI_CLK 10
+#define GCC_CE1_CLK 11
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 12
+#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 13
+#define GCC_CPUSS_AHB_CLK 14
+#define GCC_CPUSS_AHB_CLK_SRC 15
+#define GCC_CPUSS_RBCPR_CLK 16
+#define GCC_CPUSS_RBCPR_CLK_SRC 17
+#define GCC_DDRSS_GPU_AXI_CLK 18
+#define GCC_DISP_AHB_CLK 19
+#define GCC_DISP_AXI_CLK 20
+#define GCC_DISP_GPLL0_CLK_SRC 21
+#define GCC_DISP_GPLL0_DIV_CLK_SRC 22
+#define GCC_DISP_XO_CLK 23
+#define GCC_GP1_CLK 24
+#define GCC_GP1_CLK_SRC 25
+#define GCC_GP2_CLK 26
+#define GCC_GP2_CLK_SRC 27
+#define GCC_GP3_CLK 28
+#define GCC_GP3_CLK_SRC 29
+#define GCC_GPU_CFG_AHB_CLK 30
+#define GCC_GPU_GPLL0_CLK_SRC 31
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 32
+#define GCC_GPU_MEMNOC_GFX_CLK 33
+#define GCC_GPU_SNOC_DVM_GFX_CLK 34
+#define GCC_MSS_AXIS2_CLK 35
+#define GCC_MSS_CFG_AHB_CLK 36
+#define GCC_MSS_GPLL0_DIV_CLK_SRC 37
+#define GCC_MSS_MFAB_AXIS_CLK 38
+#define GCC_MSS_Q6_MEMNOC_AXI_CLK 39
+#define GCC_MSS_SNOC_AXI_CLK 40
+#define GCC_PCIE_0_AUX_CLK 41
+#define GCC_PCIE_0_AUX_CLK_SRC 42
+#define GCC_PCIE_0_CFG_AHB_CLK 43
+#define GCC_PCIE_0_CLKREF_CLK 44
+#define GCC_PCIE_0_MSTR_AXI_CLK 45
+#define GCC_PCIE_0_PIPE_CLK 46
+#define GCC_PCIE_0_SLV_AXI_CLK 47
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 48
+#define GCC_PCIE_1_AUX_CLK 49
+#define GCC_PCIE_1_AUX_CLK_SRC 50
+#define GCC_PCIE_1_CFG_AHB_CLK 51
+#define GCC_PCIE_1_CLKREF_CLK 52
+#define GCC_PCIE_1_MSTR_AXI_CLK 53
+#define GCC_PCIE_1_PIPE_CLK 54
+#define GCC_PCIE_1_SLV_AXI_CLK 55
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 56
+#define GCC_PCIE_PHY_AUX_CLK 57
+#define GCC_PCIE_PHY_REFGEN_CLK 58
+#define GCC_PCIE_PHY_REFGEN_CLK_SRC 59
+#define GCC_PDM2_CLK 60
+#define GCC_PDM2_CLK_SRC 61
+#define GCC_PDM_AHB_CLK 62
+#define GCC_PDM_XO4_CLK 63
+#define GCC_PRNG_AHB_CLK 64
+#define GCC_QMIP_CAMERA_AHB_CLK 65
+#define GCC_QMIP_DISP_AHB_CLK 66
+#define GCC_QMIP_VIDEO_AHB_CLK 67
+#define GCC_QUPV3_WRAP0_S0_CLK 68
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 69
+#define GCC_QUPV3_WRAP0_S1_CLK 70
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 71
+#define GCC_QUPV3_WRAP0_S2_CLK 72
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 73
+#define GCC_QUPV3_WRAP0_S3_CLK 74
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 75
+#define GCC_QUPV3_WRAP0_S4_CLK 76
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 77
+#define GCC_QUPV3_WRAP0_S5_CLK 78
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 79
+#define GCC_QUPV3_WRAP0_S6_CLK 80
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 81
+#define GCC_QUPV3_WRAP0_S7_CLK 82
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 83
+#define GCC_QUPV3_WRAP1_S0_CLK 84
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 85
+#define GCC_QUPV3_WRAP1_S1_CLK 86
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 87
+#define GCC_QUPV3_WRAP1_S2_CLK 88
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 89
+#define GCC_QUPV3_WRAP1_S3_CLK 90
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 91
+#define GCC_QUPV3_WRAP1_S4_CLK 92
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 93
+#define GCC_QUPV3_WRAP1_S5_CLK 94
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 95
+#define GCC_QUPV3_WRAP1_S6_CLK 96
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 97
+#define GCC_QUPV3_WRAP1_S7_CLK 98
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 99
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 100
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 101
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 102
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 103
+#define GCC_SDCC2_AHB_CLK 104
+#define GCC_SDCC2_APPS_CLK 105
+#define GCC_SDCC2_APPS_CLK_SRC 106
+#define GCC_SDCC4_AHB_CLK 107
+#define GCC_SDCC4_APPS_CLK 108
+#define GCC_SDCC4_APPS_CLK_SRC 109
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 110
+#define GCC_TSIF_AHB_CLK 111
+#define GCC_TSIF_INACTIVITY_TIMERS_CLK 112
+#define GCC_TSIF_REF_CLK 113
+#define GCC_TSIF_REF_CLK_SRC 114
+#define GCC_UFS_CARD_AHB_CLK 115
+#define GCC_UFS_CARD_AXI_CLK 116
+#define GCC_UFS_CARD_AXI_CLK_SRC 117
+#define GCC_UFS_CARD_CLKREF_CLK 118
+#define GCC_UFS_CARD_ICE_CORE_CLK 119
+#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 120
+#define GCC_UFS_CARD_PHY_AUX_CLK 121
+#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 122
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 123
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 124
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 125
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK 126
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 127
+#define GCC_UFS_MEM_CLKREF_CLK 128
+#define GCC_UFS_PHY_AHB_CLK 129
+#define GCC_UFS_PHY_AXI_CLK 130
+#define GCC_UFS_PHY_AXI_CLK_SRC 131
+#define GCC_UFS_PHY_ICE_CORE_CLK 132
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 133
+#define GCC_UFS_PHY_PHY_AUX_CLK 134
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 135
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 136
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 137
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 138
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 139
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 140
+#define GCC_USB30_PRIM_MASTER_CLK 141
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 142
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 143
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 144
+#define GCC_USB30_PRIM_SLEEP_CLK 145
+#define GCC_USB30_SEC_MASTER_CLK 146
+#define GCC_USB30_SEC_MASTER_CLK_SRC 147
+#define GCC_USB30_SEC_MOCK_UTMI_CLK 148
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 149
+#define GCC_USB30_SEC_SLEEP_CLK 150
+#define GCC_USB3_PRIM_CLKREF_CLK 151
+#define GCC_USB3_PRIM_PHY_AUX_CLK 152
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 153
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 154
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 155
+#define GCC_USB3_SEC_CLKREF_CLK 156
+#define GCC_USB3_SEC_PHY_AUX_CLK 157
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 158
+#define GCC_USB3_SEC_PHY_PIPE_CLK 159
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK 160
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK 161
+#define GCC_VIDEO_AHB_CLK 162
+#define GCC_VIDEO_AXI_CLK 163
+#define GCC_VIDEO_XO_CLK 164
+#define GPLL0 165
+#define GPLL0_OUT_EVEN 166
+#define GPLL0_OUT_MAIN 167
+#define GCC_GPU_IREF_CLK 168
+#define GCC_SDCC1_AHB_CLK 169
+#define GCC_SDCC1_APPS_CLK 170
+#define GCC_SDCC1_ICE_CORE_CLK 171
+#define GCC_SDCC1_APPS_CLK_SRC 172
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 173
+#define GCC_APC_VS_CLK 174
+#define GCC_GPU_VS_CLK 175
+#define GCC_MSS_VS_CLK 176
+#define GCC_VDDA_VS_CLK 177
+#define GCC_VDDCX_VS_CLK 178
+#define GCC_VDDMX_VS_CLK 179
+#define GCC_VS_CTRL_AHB_CLK 180
+#define GCC_VS_CTRL_CLK 181
+#define GCC_VS_CTRL_CLK_SRC 182
+#define GCC_VSENSOR_CLK_SRC 183
+#define GPLL4 184
+
+/* GCC Resets */
+>>>>>>>
+#define GCC_MMSS_BCR 0
+#define GCC_PCIE_0_BCR 1
+#define GCC_PCIE_1_BCR 2
+#define GCC_PCIE_PHY_BCR 3
+#define GCC_PDM_BCR 4
+#define GCC_PRNG_BCR 5
+#define GCC_QUPV3_WRAPPER_0_BCR 6
+#define GCC_QUPV3_WRAPPER_1_BCR 7
+#define GCC_QUSB2PHY_PRIM_BCR 8
+#define GCC_QUSB2PHY_SEC_BCR 9
+#define GCC_SDCC2_BCR 10
+#define GCC_SDCC4_BCR 11
+#define GCC_TSIF_BCR 12
+#define GCC_UFS_CARD_BCR 13
+#define GCC_UFS_PHY_BCR 14
+#define GCC_USB30_PRIM_BCR 15
+#define GCC_USB30_SEC_BCR 16
+#define GCC_USB3_PHY_PRIM_BCR 17
+#define GCC_USB3PHY_PHY_PRIM_BCR 18
+#define GCC_USB3_DP_PHY_PRIM_BCR 19
+#define GCC_USB3_PHY_SEC_BCR 20
+#define GCC_USB3PHY_PHY_SEC_BCR 21
+#define GCC_USB3_DP_PHY_SEC_BCR 22
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 23
+#define GCC_PCIE_0_PHY_BCR 24
+#define GCC_PCIE_1_PHY_BCR 25
+
+/* GCC GDSCRs */
+#define PCIE_0_GDSC 0
+#define PCIE_1_GDSC 1
+#define UFS_CARD_GDSC 2
+#define UFS_PHY_GDSC 3
+#define USB30_PRIM_GDSC 4
+#define USB30_SEC_GDSC 5
+#define HLOS1_VOTE_AGGRE_NOC_MMU_AUDIO_TBU_GDSC 6
+#define HLOS1_VOTE_AGGRE_NOC_MMU_PCIE_TBU_GDSC 7
+#define HLOS1_VOTE_AGGRE_NOC_MMU_TBU1_GDSC 8
+#define HLOS1_VOTE_AGGRE_NOC_MMU_TBU2_GDSC 9
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC 10
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC 11
+#define HLOS1_VOTE_MMNOC_MMU_TBU_SF_GDSC 12
+
+#endif
diff --git a/rr-cache/1a12c9e7bc9e61f3fd259e77b7e798a449dd5e0d/preimage.3 b/rr-cache/1a12c9e7bc9e61f3fd259e77b7e798a449dd5e0d/preimage.3
new file mode 100644
index 0000000..e43c9f4
--- /dev/null
+++ b/rr-cache/1a12c9e7bc9e61f3fd259e77b7e798a449dd5e0d/preimage.3
@@ -0,0 +1,440 @@
+<<<<<<<
+/* SPDX-License-Identifier: GPL-2.0 */
+=======
+// SPDX-License-Identifier: GPL-2.0
+>>>>>>>
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+<<<<<<<
+#ifndef _DT_BINDINGS_CLK_MSM_GCC_SDM845_H
+#define _DT_BINDINGS_CLK_MSM_GCC_SDM845_H
+
+/* GCC clock registers */
+#define BI_TCXO 0
+#define GCC_AGGRE_NOC_PCIE_TBU_CLK 1
+#define GCC_AGGRE_UFS_CARD_AXI_CLK 2
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 3
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 4
+#define GCC_AGGRE_USB3_SEC_AXI_CLK 5
+#define GCC_BOOT_ROM_AHB_CLK 6
+#define GCC_CAMERA_AHB_CLK 7
+#define GCC_CAMERA_AXI_CLK 8
+#define GCC_CAMERA_XO_CLK 9
+#define GCC_CE1_AHB_CLK 10
+#define GCC_CE1_AXI_CLK 11
+#define GCC_CE1_CLK 12
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 13
+#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 14
+#define GCC_CPUSS_AHB_CLK 15
+#define GCC_CPUSS_AHB_CLK_SRC 16
+#define GCC_CPUSS_DVM_BUS_CLK 17
+#define GCC_CPUSS_GNOC_CLK 18
+#define GCC_CPUSS_RBCPR_CLK 19
+#define GCC_CPUSS_RBCPR_CLK_SRC 20
+#define GCC_DDRSS_GPU_AXI_CLK 21
+#define GCC_DISP_AHB_CLK 22
+#define GCC_DISP_AXI_CLK 23
+#define GCC_DISP_GPLL0_CLK_SRC 24
+#define GCC_DISP_GPLL0_DIV_CLK_SRC 25
+#define GCC_DISP_XO_CLK 26
+#define GCC_GP1_CLK 27
+#define GCC_GP1_CLK_SRC 28
+#define GCC_GP2_CLK 29
+#define GCC_GP2_CLK_SRC 30
+#define GCC_GP3_CLK 31
+#define GCC_GP3_CLK_SRC 32
+#define GCC_GPU_CFG_AHB_CLK 33
+#define GCC_GPU_GPLL0_CLK_SRC 34
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 35
+#define GCC_GPU_MEMNOC_GFX_CLK 36
+#define GCC_GPU_SNOC_DVM_GFX_CLK 37
+#define GCC_MSS_AXIS2_CLK 38
+#define GCC_MSS_CFG_AHB_CLK 39
+#define GCC_MSS_GPLL0_DIV_CLK_SRC 40
+#define GCC_MSS_MFAB_AXIS_CLK 41
+#define GCC_MSS_Q6_MEMNOC_AXI_CLK 42
+#define GCC_MSS_SNOC_AXI_CLK 43
+#define GCC_PCIE_0_AUX_CLK 44
+#define GCC_PCIE_0_AUX_CLK_SRC 45
+#define GCC_PCIE_0_CFG_AHB_CLK 46
+#define GCC_PCIE_0_CLKREF_CLK 47
+#define GCC_PCIE_0_MSTR_AXI_CLK 48
+#define GCC_PCIE_0_PIPE_CLK 49
+#define GCC_PCIE_0_SLV_AXI_CLK 50
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 51
+#define GCC_PCIE_1_AUX_CLK 52
+#define GCC_PCIE_1_AUX_CLK_SRC 53
+#define GCC_PCIE_1_CFG_AHB_CLK 54
+#define GCC_PCIE_1_CLKREF_CLK 55
+#define GCC_PCIE_1_MSTR_AXI_CLK 56
+#define GCC_PCIE_1_PIPE_CLK 57
+#define GCC_PCIE_1_SLV_AXI_CLK 58
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 59
+#define GCC_PCIE_PHY_AUX_CLK 60
+#define GCC_PCIE_PHY_REFGEN_CLK 61
+#define GCC_PCIE_PHY_REFGEN_CLK_SRC 62
+#define GCC_PDM2_CLK 63
+#define GCC_PDM2_CLK_SRC 64
+#define GCC_PDM_AHB_CLK 65
+#define GCC_PDM_XO4_CLK 66
+#define GCC_PRNG_AHB_CLK 67
+#define GCC_QMIP_CAMERA_AHB_CLK 68
+#define GCC_QMIP_DISP_AHB_CLK 69
+#define GCC_QMIP_VIDEO_AHB_CLK 70
+#define GCC_QUPV3_WRAP0_S0_CLK 71
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 72
+#define GCC_QUPV3_WRAP0_S1_CLK 73
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 74
+#define GCC_QUPV3_WRAP0_S2_CLK 75
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 76
+#define GCC_QUPV3_WRAP0_S3_CLK 77
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 78
+#define GCC_QUPV3_WRAP0_S4_CLK 79
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 80
+#define GCC_QUPV3_WRAP0_S5_CLK 81
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 82
+#define GCC_QUPV3_WRAP0_S6_CLK 83
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 84
+#define GCC_QUPV3_WRAP0_S7_CLK 85
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 86
+#define GCC_QUPV3_WRAP1_S0_CLK 87
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 88
+#define GCC_QUPV3_WRAP1_S1_CLK 89
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 90
+#define GCC_QUPV3_WRAP1_S2_CLK 91
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 92
+#define GCC_QUPV3_WRAP1_S3_CLK 93
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 94
+#define GCC_QUPV3_WRAP1_S4_CLK 95
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 96
+#define GCC_QUPV3_WRAP1_S5_CLK 97
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 98
+#define GCC_QUPV3_WRAP1_S6_CLK 99
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 100
+#define GCC_QUPV3_WRAP1_S7_CLK 101
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 102
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 103
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 104
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 105
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 106
+#define GCC_SDCC2_AHB_CLK 107
+#define GCC_SDCC2_APPS_CLK 108
+#define GCC_SDCC2_APPS_CLK_SRC 109
+#define GCC_SDCC4_AHB_CLK 110
+#define GCC_SDCC4_APPS_CLK 111
+#define GCC_SDCC4_APPS_CLK_SRC 112
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 113
+#define GCC_TSIF_AHB_CLK 114
+#define GCC_TSIF_INACTIVITY_TIMERS_CLK 115
+#define GCC_TSIF_REF_CLK 116
+#define GCC_TSIF_REF_CLK_SRC 117
+#define GCC_UFS_CARD_AHB_CLK 118
+#define GCC_UFS_CARD_AXI_CLK 119
+#define GCC_UFS_CARD_AXI_CLK_SRC 120
+#define GCC_UFS_CARD_CLKREF_CLK 121
+#define GCC_UFS_CARD_ICE_CORE_CLK 122
+#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 123
+#define GCC_UFS_CARD_PHY_AUX_CLK 124
+#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 125
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 126
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 127
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 128
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK 129
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 130
+#define GCC_UFS_MEM_CLKREF_CLK 131
+#define GCC_UFS_PHY_AHB_CLK 132
+#define GCC_UFS_PHY_AXI_CLK 133
+#define GCC_UFS_PHY_AXI_CLK_SRC 134
+#define GCC_UFS_PHY_ICE_CORE_CLK 135
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 136
+#define GCC_UFS_PHY_PHY_AUX_CLK 137
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 138
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 139
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 140
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 141
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 142
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 143
+#define GCC_USB30_PRIM_MASTER_CLK 144
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 145
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 146
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 147
+#define GCC_USB30_PRIM_SLEEP_CLK 148
+#define GCC_USB30_SEC_MASTER_CLK 149
+#define GCC_USB30_SEC_MASTER_CLK_SRC 150
+#define GCC_USB30_SEC_MOCK_UTMI_CLK 151
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 152
+#define GCC_USB30_SEC_SLEEP_CLK 153
+#define GCC_USB3_PRIM_CLKREF_CLK 154
+#define GCC_USB3_PRIM_PHY_AUX_CLK 155
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 156
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 157
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 158
+#define GCC_USB3_SEC_CLKREF_CLK 159
+#define GCC_USB3_SEC_PHY_AUX_CLK 160
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 161
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK 162
+#define GCC_USB3_SEC_PHY_PIPE_CLK 163
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK 164
+#define GCC_VIDEO_AHB_CLK 165
+#define GCC_VIDEO_AXI_CLK 166
+#define GCC_VIDEO_XO_CLK 167
+#define GPLL0 168
+#define GPLL0_OUT_EVEN 169
+#define GPLL0_OUT_MAIN 170
+#define GCC_GPU_IREF_CLK 171
+#define GCC_SDCC1_AHB_CLK 172
+#define GCC_SDCC1_APPS_CLK 173
+#define GCC_SDCC1_ICE_CORE_CLK 174
+#define GCC_SDCC1_APPS_CLK_SRC 175
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 176
+#define GCC_APC_VS_CLK 177
+#define GCC_GPU_VS_CLK 178
+#define GCC_MSS_VS_CLK 179
+#define GCC_VDDA_VS_CLK 180
+#define GCC_VDDCX_VS_CLK 181
+#define GCC_VDDMX_VS_CLK 182
+#define GCC_VS_CTRL_AHB_CLK 183
+#define GCC_VS_CTRL_CLK 184
+#define GCC_VS_CTRL_CLK_SRC 185
+#define GCC_VSENSOR_CLK_SRC 186
+#define GPLL4 187
+
+/* GCC reset clocks */
+=======
+#ifndef _DT_BINDINGS_CLK_SDM_GCC_SDM845_H
+#define _DT_BINDINGS_CLK_SDM_GCC_SDM845_H
+
+/* GCC clock registers */
+#define GCC_AGGRE_NOC_PCIE_TBU_CLK 0
+#define GCC_AGGRE_UFS_CARD_AXI_CLK 1
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 2
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 3
+#define GCC_AGGRE_USB3_SEC_AXI_CLK 4
+#define GCC_BOOT_ROM_AHB_CLK 5
+#define GCC_CAMERA_AHB_CLK 6
+#define GCC_CAMERA_AXI_CLK 7
+#define GCC_CAMERA_XO_CLK 8
+#define GCC_CE1_AHB_CLK 9
+#define GCC_CE1_AXI_CLK 10
+#define GCC_CE1_CLK 11
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 12
+#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 13
+#define GCC_CPUSS_AHB_CLK 14
+#define GCC_CPUSS_AHB_CLK_SRC 15
+#define GCC_CPUSS_RBCPR_CLK 16
+#define GCC_CPUSS_RBCPR_CLK_SRC 17
+#define GCC_DDRSS_GPU_AXI_CLK 18
+#define GCC_DISP_AHB_CLK 19
+#define GCC_DISP_AXI_CLK 20
+#define GCC_DISP_GPLL0_CLK_SRC 21
+#define GCC_DISP_GPLL0_DIV_CLK_SRC 22
+#define GCC_DISP_XO_CLK 23
+#define GCC_GP1_CLK 24
+#define GCC_GP1_CLK_SRC 25
+#define GCC_GP2_CLK 26
+#define GCC_GP2_CLK_SRC 27
+#define GCC_GP3_CLK 28
+#define GCC_GP3_CLK_SRC 29
+#define GCC_GPU_CFG_AHB_CLK 30
+#define GCC_GPU_GPLL0_CLK_SRC 31
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 32
+#define GCC_GPU_MEMNOC_GFX_CLK 33
+#define GCC_GPU_SNOC_DVM_GFX_CLK 34
+#define GCC_MSS_AXIS2_CLK 35
+#define GCC_MSS_CFG_AHB_CLK 36
+#define GCC_MSS_GPLL0_DIV_CLK_SRC 37
+#define GCC_MSS_MFAB_AXIS_CLK 38
+#define GCC_MSS_Q6_MEMNOC_AXI_CLK 39
+#define GCC_MSS_SNOC_AXI_CLK 40
+#define GCC_PCIE_0_AUX_CLK 41
+#define GCC_PCIE_0_AUX_CLK_SRC 42
+#define GCC_PCIE_0_CFG_AHB_CLK 43
+#define GCC_PCIE_0_CLKREF_CLK 44
+#define GCC_PCIE_0_MSTR_AXI_CLK 45
+#define GCC_PCIE_0_PIPE_CLK 46
+#define GCC_PCIE_0_SLV_AXI_CLK 47
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 48
+#define GCC_PCIE_1_AUX_CLK 49
+#define GCC_PCIE_1_AUX_CLK_SRC 50
+#define GCC_PCIE_1_CFG_AHB_CLK 51
+#define GCC_PCIE_1_CLKREF_CLK 52
+#define GCC_PCIE_1_MSTR_AXI_CLK 53
+#define GCC_PCIE_1_PIPE_CLK 54
+#define GCC_PCIE_1_SLV_AXI_CLK 55
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 56
+#define GCC_PCIE_PHY_AUX_CLK 57
+#define GCC_PCIE_PHY_REFGEN_CLK 58
+#define GCC_PCIE_PHY_REFGEN_CLK_SRC 59
+#define GCC_PDM2_CLK 60
+#define GCC_PDM2_CLK_SRC 61
+#define GCC_PDM_AHB_CLK 62
+#define GCC_PDM_XO4_CLK 63
+#define GCC_PRNG_AHB_CLK 64
+#define GCC_QMIP_CAMERA_AHB_CLK 65
+#define GCC_QMIP_DISP_AHB_CLK 66
+#define GCC_QMIP_VIDEO_AHB_CLK 67
+#define GCC_QUPV3_WRAP0_S0_CLK 68
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 69
+#define GCC_QUPV3_WRAP0_S1_CLK 70
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 71
+#define GCC_QUPV3_WRAP0_S2_CLK 72
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 73
+#define GCC_QUPV3_WRAP0_S3_CLK 74
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 75
+#define GCC_QUPV3_WRAP0_S4_CLK 76
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 77
+#define GCC_QUPV3_WRAP0_S5_CLK 78
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 79
+#define GCC_QUPV3_WRAP0_S6_CLK 80
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 81
+#define GCC_QUPV3_WRAP0_S7_CLK 82
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 83
+#define GCC_QUPV3_WRAP1_S0_CLK 84
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 85
+#define GCC_QUPV3_WRAP1_S1_CLK 86
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 87
+#define GCC_QUPV3_WRAP1_S2_CLK 88
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 89
+#define GCC_QUPV3_WRAP1_S3_CLK 90
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 91
+#define GCC_QUPV3_WRAP1_S4_CLK 92
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 93
+#define GCC_QUPV3_WRAP1_S5_CLK 94
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 95
+#define GCC_QUPV3_WRAP1_S6_CLK 96
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 97
+#define GCC_QUPV3_WRAP1_S7_CLK 98
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 99
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 100
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 101
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 102
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 103
+#define GCC_SDCC2_AHB_CLK 104
+#define GCC_SDCC2_APPS_CLK 105
+#define GCC_SDCC2_APPS_CLK_SRC 106
+#define GCC_SDCC4_AHB_CLK 107
+#define GCC_SDCC4_APPS_CLK 108
+#define GCC_SDCC4_APPS_CLK_SRC 109
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 110
+#define GCC_TSIF_AHB_CLK 111
+#define GCC_TSIF_INACTIVITY_TIMERS_CLK 112
+#define GCC_TSIF_REF_CLK 113
+#define GCC_TSIF_REF_CLK_SRC 114
+#define GCC_UFS_CARD_AHB_CLK 115
+#define GCC_UFS_CARD_AXI_CLK 116
+#define GCC_UFS_CARD_AXI_CLK_SRC 117
+#define GCC_UFS_CARD_CLKREF_CLK 118
+#define GCC_UFS_CARD_ICE_CORE_CLK 119
+#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 120
+#define GCC_UFS_CARD_PHY_AUX_CLK 121
+#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 122
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 123
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 124
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 125
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK 126
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 127
+#define GCC_UFS_MEM_CLKREF_CLK 128
+#define GCC_UFS_PHY_AHB_CLK 129
+#define GCC_UFS_PHY_AXI_CLK 130
+#define GCC_UFS_PHY_AXI_CLK_SRC 131
+#define GCC_UFS_PHY_ICE_CORE_CLK 132
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 133
+#define GCC_UFS_PHY_PHY_AUX_CLK 134
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 135
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 136
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 137
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 138
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 139
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 140
+#define GCC_USB30_PRIM_MASTER_CLK 141
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 142
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 143
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 144
+#define GCC_USB30_PRIM_SLEEP_CLK 145
+#define GCC_USB30_SEC_MASTER_CLK 146
+#define GCC_USB30_SEC_MASTER_CLK_SRC 147
+#define GCC_USB30_SEC_MOCK_UTMI_CLK 148
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 149
+#define GCC_USB30_SEC_SLEEP_CLK 150
+#define GCC_USB3_PRIM_CLKREF_CLK 151
+#define GCC_USB3_PRIM_PHY_AUX_CLK 152
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 153
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 154
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 155
+#define GCC_USB3_SEC_CLKREF_CLK 156
+#define GCC_USB3_SEC_PHY_AUX_CLK 157
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 158
+#define GCC_USB3_SEC_PHY_PIPE_CLK 159
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK 160
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK 161
+#define GCC_VIDEO_AHB_CLK 162
+#define GCC_VIDEO_AXI_CLK 163
+#define GCC_VIDEO_XO_CLK 164
+#define GPLL0 165
+#define GPLL0_OUT_EVEN 166
+#define GPLL0_OUT_MAIN 167
+#define GCC_GPU_IREF_CLK 168
+#define GCC_SDCC1_AHB_CLK 169
+#define GCC_SDCC1_APPS_CLK 170
+#define GCC_SDCC1_ICE_CORE_CLK 171
+#define GCC_SDCC1_APPS_CLK_SRC 172
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 173
+#define GCC_APC_VS_CLK 174
+#define GCC_GPU_VS_CLK 175
+#define GCC_MSS_VS_CLK 176
+#define GCC_VDDA_VS_CLK 177
+#define GCC_VDDCX_VS_CLK 178
+#define GCC_VDDMX_VS_CLK 179
+#define GCC_VS_CTRL_AHB_CLK 180
+#define GCC_VS_CTRL_CLK 181
+#define GCC_VS_CTRL_CLK_SRC 182
+#define GCC_VSENSOR_CLK_SRC 183
+#define GPLL4 184
+
+/* GCC Resets */
+>>>>>>>
+#define GCC_MMSS_BCR 0
+#define GCC_PCIE_0_BCR 1
+#define GCC_PCIE_1_BCR 2
+#define GCC_PCIE_PHY_BCR 3
+#define GCC_PDM_BCR 4
+#define GCC_PRNG_BCR 5
+#define GCC_QUPV3_WRAPPER_0_BCR 6
+#define GCC_QUPV3_WRAPPER_1_BCR 7
+#define GCC_QUSB2PHY_PRIM_BCR 8
+#define GCC_QUSB2PHY_SEC_BCR 9
+#define GCC_SDCC2_BCR 10
+#define GCC_SDCC4_BCR 11
+#define GCC_TSIF_BCR 12
+#define GCC_UFS_CARD_BCR 13
+#define GCC_UFS_PHY_BCR 14
+#define GCC_USB30_PRIM_BCR 15
+#define GCC_USB30_SEC_BCR 16
+#define GCC_USB3_PHY_PRIM_BCR 17
+#define GCC_USB3PHY_PHY_PRIM_BCR 18
+#define GCC_USB3_DP_PHY_PRIM_BCR 19
+#define GCC_USB3_PHY_SEC_BCR 20
+#define GCC_USB3PHY_PHY_SEC_BCR 21
+#define GCC_USB3_DP_PHY_SEC_BCR 22
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 23
+#define GCC_PCIE_0_PHY_BCR 24
+#define GCC_PCIE_1_PHY_BCR 25
+
+/* GCC GDSCRs */
+#define PCIE_0_GDSC 0
+#define PCIE_1_GDSC 1
+#define UFS_CARD_GDSC 2
+#define UFS_PHY_GDSC 3
+#define USB30_PRIM_GDSC 4
+#define USB30_SEC_GDSC 5
+#define HLOS1_VOTE_AGGRE_NOC_MMU_AUDIO_TBU_GDSC 6
+#define HLOS1_VOTE_AGGRE_NOC_MMU_PCIE_TBU_GDSC 7
+#define HLOS1_VOTE_AGGRE_NOC_MMU_TBU1_GDSC 8
+#define HLOS1_VOTE_AGGRE_NOC_MMU_TBU2_GDSC 9
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC 10
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC 11
+#define HLOS1_VOTE_MMNOC_MMU_TBU_SF_GDSC 12
+
+#endif
diff --git a/rr-cache/1aa2efd67a58fd95dfe8b312372912124fd54dc4/postimage b/rr-cache/1aa2efd67a58fd95dfe8b312372912124fd54dc4/postimage
new file mode 100644
index 0000000..bdee36c
--- /dev/null
+++ b/rr-cache/1aa2efd67a58fd95dfe8b312372912124fd54dc4/postimage
@@ -0,0 +1,1570 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/qcom,gcc-msm8996.h>
+#include <dt-bindings/clock/qcom,mmcc-msm8996.h>
+#include <dt-bindings/clock/qcom,rpmcc.h>
+#include <dt-bindings/soc/qcom,apr.h>
+
+/ {
+ model = "Qualcomm Technologies, Inc. MSM8996";
+
+ interrupt-parent = <&intc>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen { };
+
+ memory {
+ device_type = "memory";
+ /* We expect the bootloader to fill in the reg */
+ reg = <0 0 0 0>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ mba_region: mba@91500000 {
+ reg = <0x0 0x91500000 0x0 0x200000>;
+ no-map;
+ };
+
+ slpi_region: slpi@90b00000 {
+ reg = <0x0 0x90b00000 0x0 0xa00000>;
+ no-map;
+ };
+
+ venus_region: venus@90400000 {
+ reg = <0x0 0x90400000 0x0 0x700000>;
+ no-map;
+ };
+
+ adsp_region: adsp@8ea00000 {
+ reg = <0x0 0x8ea00000 0x0 0x1a00000>;
+ no-map;
+ };
+
+ mpss_region: mpss@88800000 {
+ reg = <0x0 0x88800000 0x0 0x6200000>;
+ no-map;
+ };
+
+ smem_mem: smem-mem@86000000 {
+ reg = <0x0 0x86000000 0x0 0x200000>;
+ no-map;
+ };
+
+ memory@85800000 {
+ reg = <0x0 0x85800000 0x0 0x800000>;
+ no-map;
+ };
+
+ memory@86200000 {
+ reg = <0x0 0x86200000 0x0 0x2600000>;
+ no-map;
+ };
+
+ rmtfs@86700000 {
+ compatible = "qcom,rmtfs-mem";
+
+ size = <0x0 0x200000>;
+ alloc-ranges = <0x0 0xa0000000 0x0 0x2000000>;
+ no-map;
+
+ qcom,client-id = <1>;
+ qcom,vmid = <15>;
+ };
+
+ zap_shader_region: gpu@8f200000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x8f200000 0 0x2300000>;
+ no-map;
+ };
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ CPU0: cpu@0 {
+ device_type = "cpu";
+ compatible = "qcom,kryo";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ next-level-cache = <&L2_0>;
+ L2_0: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ };
+ };
+
+ CPU1: cpu@1 {
+ device_type = "cpu";
+ compatible = "qcom,kryo";
+ reg = <0x0 0x1>;
+ enable-method = "psci";
+ next-level-cache = <&L2_0>;
+ };
+
+ CPU2: cpu@100 {
+ device_type = "cpu";
+ compatible = "qcom,kryo";
+ reg = <0x0 0x100>;
+ enable-method = "psci";
+ next-level-cache = <&L2_1>;
+ L2_1: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ };
+ };
+
+ CPU3: cpu@101 {
+ device_type = "cpu";
+ compatible = "qcom,kryo";
+ reg = <0x0 0x101>;
+ enable-method = "psci";
+ next-level-cache = <&L2_1>;
+ };
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&CPU0>;
+ };
+
+ core1 {
+ cpu = <&CPU1>;
+ };
+ };
+
+ cluster1 {
+ core0 {
+ cpu = <&CPU2>;
+ };
+
+ core1 {
+ cpu = <&CPU3>;
+ };
+ };
+ };
+ };
+
+ thermal-zones {
+ cpu-thermal0 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 3>;
+
+ trips {
+ cpu_alert0: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit0: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal1 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 5>;
+
+ trips {
+ cpu_alert1: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit1: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal2 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 8>;
+
+ trips {
+ cpu_alert2: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit2: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal3 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 10>;
+
+ trips {
+ cpu_alert3: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit3: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ clocks {
+ xo_board: xo_board {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <19200000>;
+ clock-output-names = "xo_board";
+ };
+
+ sleep_clk: sleep_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32764>;
+ clock-output-names = "sleep_clk";
+ };
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ firmware {
+ scm {
+ compatible = "qcom,scm-msm8996";
+
+ qcom,dload-mode = <&tcsr 0x13000>;
+ };
+ };
+
+ tcsr_mutex: hwlock {
+ compatible = "qcom,tcsr-mutex";
+ syscon = <&tcsr_mutex_regs 0 0x1000>;
+ #hwlock-cells = <1>;
+ };
+
+ smem {
+ compatible = "qcom,smem";
+ memory-region = <&smem_mem>;
+ hwlocks = <&tcsr_mutex 3>;
+ };
+
+ rpm-glink {
+ compatible = "qcom,glink-rpm";
+
+ interrupts = <GIC_SPI 168 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,rpm-msg-ram = <&rpm_msg_ram>;
+
+ mboxes = <&apcs_glb 0>;
+
+ rpm_requests {
+ compatible = "qcom,rpm-msm8996";
+ qcom,glink-channels = "rpm_requests";
+
+ rpmcc: qcom,rpmcc {
+ compatible = "qcom,rpmcc-msm8996";
+ #clock-cells = <1>;
+ };
+
+ pm8994-regulators {
+ compatible = "qcom,rpm-pm8994-regulators";
+
+ pm8994_s1: s1 {};
+ pm8994_s2: s2 {};
+ pm8994_s3: s3 {};
+ pm8994_s4: s4 {};
+ pm8994_s5: s5 {};
+ pm8994_s6: s6 {};
+ pm8994_s7: s7 {};
+ pm8994_s8: s8 {};
+ pm8994_s9: s9 {};
+ pm8994_s10: s10 {};
+ pm8994_s11: s11 {};
+ pm8994_s12: s12 {};
+
+ pm8994_l1: l1 {};
+ pm8994_l2: l2 {};
+ pm8994_l3: l3 {};
+ pm8994_l4: l4 {};
+ pm8994_l5: l5 {};
+ pm8994_l6: l6 {};
+ pm8994_l7: l7 {};
+ pm8994_l8: l8 {};
+ pm8994_l9: l9 {};
+ pm8994_l10: l10 {};
+ pm8994_l11: l11 {};
+ pm8994_l12: l12 {};
+ pm8994_l13: l13 {};
+ pm8994_l14: l14 {};
+ pm8994_l15: l15 {};
+ pm8994_l16: l16 {};
+ pm8994_l17: l17 {};
+ pm8994_l18: l18 {};
+ pm8994_l19: l19 {};
+ pm8994_l20: l20 {};
+ pm8994_l21: l21 {};
+ pm8994_l22: l22 {};
+ pm8994_l23: l23 {};
+ pm8994_l24: l24 {};
+ pm8994_l25: l25 {};
+ pm8994_l26: l26 {};
+ pm8994_l27: l27 {};
+ pm8994_l28: l28 {};
+ pm8994_l29: l29 {};
+ pm8994_l30: l30 {};
+ pm8994_l31: l31 {};
+ pm8994_l32: l32 {};
+ };
+
+ };
+ };
+
+ soc: soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0xffffffff>;
+ compatible = "simple-bus";
+
+ rpm_msg_ram: memory@68000 {
+ compatible = "qcom,rpm-msg-ram";
+ reg = <0x68000 0x6000>;
+ };
+
+ tcsr_mutex_regs: syscon@740000 {
+ compatible = "syscon";
+ reg = <0x740000 0x20000>;
+ };
+
+ tcsr: syscon@7a0000 {
+ compatible = "qcom,tcsr-msm8996", "syscon";
+ reg = <0x7a0000 0x18000>;
+ };
+
+ intc: interrupt-controller@9bc0000 {
+ compatible = "arm,gic-v3";
+ #interrupt-cells = <3>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ interrupt-controller;
+ #redistributor-regions = <1>;
+ redistributor-stride = <0x0 0x40000>;
+ reg = <0x09bc0000 0x10000>,
+ <0x09c00000 0x100000>;
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+
+ msi_alias0: interrupt-controller@9bd0000 {
+ compatible = "qcom,gic-msi-aliases";
+ reg = <0x9bd0000 0x1000>;
+ msi-controller;
+ arm,spi-ranges = <544 96>;
+ };
+ };
+
+ apcs: syscon@9820000 {
+ compatible = "syscon";
+ reg = <0x9820000 0x1000>;
+ };
+
+ apcs_glb: mailbox@9820000 {
+ compatible = "qcom,msm8996-apcs-hmss-global";
+ reg = <0x9820000 0x1000>;
+
+ #mbox-cells = <1>;
+ };
+
+ gcc: clock-controller@300000 {
+ compatible = "qcom,gcc-msm8996";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ reg = <0x300000 0x90000>;
+ };
+
+ kryocc: clock-controller@6400000 {
+ compatible = "qcom,apcc-msm8996";
+ reg = <0x6400000 0x90000>;
+ #clock-cells = <1>;
+ };
+
+ blsp1_uart1: serial@7570000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0x07570000 0x1000>;
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_UART2_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ status = "disabled";
+ };
+
+ blsp1_spi0: spi@7575000 {
+ compatible = "qcom,spi-qup-v2.2.1";
+ reg = <0x07575000 0x600>;
+ interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_QUP1_SPI_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp1_spi0_default>;
+ pinctrl-1 = <&blsp1_spi0_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp2_i2c0: i2c@75b5000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x075b5000 0x1000>;
+ interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_AHB_CLK>,
+ <&gcc GCC_BLSP2_QUP1_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_i2c0_default>;
+ pinctrl-1 = <&blsp2_i2c0_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ tsens0: thermal-sensor@4a8000 {
+ compatible = "qcom,msm8996-tsens";
+ reg = <0x4a8000 0x2000>;
+ #thermal-sensor-cells = <1>;
+ };
+
+ blsp2_uart1: serial@75b0000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0x75b0000 0x1000>;
+ interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_UART2_APPS_CLK>,
+ <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "core", "iface";
+ status = "disabled";
+ };
+
+ blsp2_i2c1: i2c@75b6000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x075b6000 0x1000>;
+ interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_AHB_CLK>,
+ <&gcc GCC_BLSP2_QUP2_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_i2c1_default>;
+ pinctrl-1 = <&blsp2_i2c1_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp2_uart2: serial@75b1000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0x075b1000 0x1000>;
+ interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_UART3_APPS_CLK>,
+ <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "core", "iface";
+ status = "disabled";
+ };
+
+ blsp1_i2c2: i2c@7577000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x07577000 0x1000>;
+ interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_AHB_CLK>,
+ <&gcc GCC_BLSP1_QUP3_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp1_i2c2_default>;
+ pinctrl-1 = <&blsp1_i2c2_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp2_spi5: spi@75ba000{
+ compatible = "qcom,spi-qup-v2.2.1";
+ reg = <0x075ba000 0x600>;
+ interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_QUP6_SPI_APPS_CLK>,
+ <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "core", "iface";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_spi5_default>;
+ pinctrl-1 = <&blsp2_spi5_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ sdhc2: sdhci@74a4900 {
+ status = "disabled";
+ compatible = "qcom,sdhci-msm-v4";
+ reg = <0x74a4900 0x314>, <0x74a4000 0x800>;
+ reg-names = "hc_mem", "core_mem";
+
+ interrupts = <0 125 IRQ_TYPE_LEVEL_HIGH>,
+ <0 221 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hc_irq", "pwr_irq";
+
+ clock-names = "iface", "core", "xo";
+ clocks = <&gcc GCC_SDCC2_AHB_CLK>,
+ <&gcc GCC_SDCC2_APPS_CLK>,
+ <&xo_board>;
+ bus-width = <4>;
+ };
+
+ msmgpio: pinctrl@1010000 {
+ compatible = "qcom,msm8996-pinctrl";
+ reg = <0x01010000 0x300000>;
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ timer@9840000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ compatible = "arm,armv7-timer-mem";
+ reg = <0x09840000 0x1000>;
+ clock-frequency = <19200000>;
+
+ frame@9850000 {
+ frame-number = <0>;
+ interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x09850000 0x1000>,
+ <0x09860000 0x1000>;
+ };
+
+ frame@9870000 {
+ frame-number = <1>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x09870000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@9880000 {
+ frame-number = <2>;
+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x09880000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@9890000 {
+ frame-number = <3>;
+ interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x09890000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@98a0000 {
+ frame-number = <4>;
+ interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x098a0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@98b0000 {
+ frame-number = <5>;
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x098b0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@98c0000 {
+ frame-number = <6>;
+ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x098c0000 0x1000>;
+ status = "disabled";
+ };
+ };
+
+ spmi_bus: qcom,spmi@400f000 {
+ compatible = "qcom,spmi-pmic-arb";
+ reg = <0x400f000 0x1000>,
+ <0x4400000 0x800000>,
+ <0x4c00000 0x800000>,
+ <0x5800000 0x200000>,
+ <0x400a000 0x002100>;
+ reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
+ interrupt-names = "periph_irq";
+ interrupts = <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,ee = <0>;
+ qcom,channel = <0>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ interrupt-controller;
+ #interrupt-cells = <4>;
+ };
+
+ ufsphy: phy@627000 {
+ compatible = "qcom,msm8996-ufs-phy-qmp-14nm";
+ reg = <0x627000 0xda8>;
+ reg-names = "phy_mem";
+ #phy-cells = <0>;
+
+ vdda-phy-supply = <&pm8994_l28>;
+ vdda-pll-supply = <&pm8994_l12>;
+
+ vdda-phy-max-microamp = <18380>;
+ vdda-pll-max-microamp = <9440>;
+
+ vddp-ref-clk-supply = <&pm8994_l25>;
+ vddp-ref-clk-max-microamp = <100>;
+ vddp-ref-clk-always-on;
+
+ clock-names = "ref_clk_src", "ref_clk";
+ clocks = <&rpmcc RPM_SMD_LN_BB_CLK>,
+ <&gcc GCC_UFS_CLKREF_CLK>;
+ status = "disabled";
+ };
+
+ ufshc@624000 {
+ compatible = "qcom,ufshc";
+ reg = <0x624000 0x2500>;
+ interrupts = <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>;
+
+ phys = <&ufsphy>;
+ phy-names = "ufsphy";
+
+ vcc-supply = <&pm8994_l20>;
+ vccq-supply = <&pm8994_l25>;
+ vccq2-supply = <&pm8994_s4>;
+
+ vcc-max-microamp = <600000>;
+ vccq-max-microamp = <450000>;
+ vccq2-max-microamp = <450000>;
+
+ power-domains = <&gcc UFS_GDSC>;
+
+ clock-names =
+ "core_clk_src",
+ "core_clk",
+ "bus_clk",
+ "bus_aggr_clk",
+ "iface_clk",
+ "core_clk_unipro_src",
+ "core_clk_unipro",
+ "core_clk_ice",
+ "ref_clk",
+ "tx_lane0_sync_clk",
+ "rx_lane0_sync_clk";
+ clocks =
+ <&gcc UFS_AXI_CLK_SRC>,
+ <&gcc GCC_UFS_AXI_CLK>,
+ <&gcc GCC_SYS_NOC_UFS_AXI_CLK>,
+ <&gcc GCC_AGGRE2_UFS_AXI_CLK>,
+ <&gcc GCC_UFS_AHB_CLK>,
+ <&gcc UFS_ICE_CORE_CLK_SRC>,
+ <&gcc GCC_UFS_UNIPRO_CORE_CLK>,
+ <&gcc GCC_UFS_ICE_CORE_CLK>,
+ <&rpmcc RPM_SMD_LN_BB_CLK>,
+ <&gcc GCC_UFS_TX_SYMBOL_0_CLK>,
+ <&gcc GCC_UFS_RX_SYMBOL_0_CLK>;
+ freq-table-hz =
+ <100000000 200000000>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <150000000 300000000>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>;
+
+ lanes-per-direction = <1>;
+ status = "disabled";
+
+ ufs_variant {
+ compatible = "qcom,ufs_variant";
+ };
+ };
+
+ mmcc: clock-controller@8c0000 {
+ compatible = "qcom,mmcc-msm8996";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ reg = <0x8c0000 0x40000>;
+ assigned-clocks = <&mmcc MMPLL9_PLL>,
+ <&mmcc MMPLL1_PLL>,
+ <&mmcc MMPLL3_PLL>,
+ <&mmcc MMPLL4_PLL>,
+ <&mmcc MMPLL5_PLL>;
+ assigned-clock-rates = <624000000>,
+ <810000000>,
+ <980000000>,
+ <960000000>,
+ <825000000>;
+ };
+
+ qfprom@74000 {
+ compatible = "qcom,qfprom";
+ reg = <0x74000 0x8ff>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ qusb2p_hstx_trim: hstx_trim@24e {
+ reg = <0x24e 0x2>;
+ bits = <5 4>;
+ };
+
+ qusb2s_hstx_trim: hstx_trim@24f {
+ reg = <0x24f 0x1>;
+ bits = <1 4>;
+ };
+ };
+
+ phy@34000 {
+ compatible = "qcom,msm8996-qmp-pcie-phy";
+ reg = <0x34000 0x488>;
+ #clock-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
+ <&gcc GCC_PCIE_PHY_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_CLKREF_CLK>;
+ clock-names = "aux", "cfg_ahb", "ref";
+
+ vdda-phy-supply = <&pm8994_l28>;
+ vdda-pll-supply = <&pm8994_l12>;
+
+ resets = <&gcc GCC_PCIE_PHY_BCR>,
+ <&gcc GCC_PCIE_PHY_COM_BCR>,
+ <&gcc GCC_PCIE_PHY_COM_NOCSR_BCR>;
+ reset-names = "phy", "common", "cfg";
+ status = "disabled";
+
+ pciephy_0: lane@35000 {
+ reg = <0x035000 0x130>,
+ <0x035200 0x200>,
+ <0x035400 0x1dc>;
+ #phy-cells = <0>;
+
+ clock-output-names = "pcie_0_pipe_clk_src";
+ clocks = <&gcc GCC_PCIE_0_PIPE_CLK>;
+ clock-names = "pipe0";
+ resets = <&gcc GCC_PCIE_0_PHY_BCR>;
+ reset-names = "lane0";
+ };
+
+ pciephy_1: lane@36000 {
+ reg = <0x036000 0x130>,
+ <0x036200 0x200>,
+ <0x036400 0x1dc>;
+ #phy-cells = <0>;
+
+ clock-output-names = "pcie_1_pipe_clk_src";
+ clocks = <&gcc GCC_PCIE_1_PIPE_CLK>;
+ clock-names = "pipe1";
+ resets = <&gcc GCC_PCIE_1_PHY_BCR>;
+ reset-names = "lane1";
+ };
+
+ pciephy_2: lane@37000 {
+ reg = <0x037000 0x130>,
+ <0x037200 0x200>,
+ <0x037400 0x1dc>;
+ #phy-cells = <0>;
+
+ clock-output-names = "pcie_2_pipe_clk_src";
+ clocks = <&gcc GCC_PCIE_2_PIPE_CLK>;
+ clock-names = "pipe2";
+ resets = <&gcc GCC_PCIE_2_PHY_BCR>;
+ reset-names = "lane2";
+ };
+ };
+
+ phy@7410000 {
+ compatible = "qcom,msm8996-qmp-usb3-phy";
+ reg = <0x7410000 0x1c4>;
+ #clock-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clocks = <&gcc GCC_USB3_PHY_AUX_CLK>,
+ <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+ <&gcc GCC_USB3_CLKREF_CLK>;
+ clock-names = "aux", "cfg_ahb", "ref";
+
+ vdda-phy-supply = <&pm8994_l28>;
+ vdda-pll-supply = <&pm8994_l12>;
+
+ resets = <&gcc GCC_USB3_PHY_BCR>,
+ <&gcc GCC_USB3PHY_PHY_BCR>;
+ reset-names = "phy", "common";
+ status = "disabled";
+
+ ssusb_phy_0: lane@7410200 {
+ reg = <0x7410200 0x200>,
+ <0x7410400 0x130>,
+ <0x7410600 0x1a8>;
+ #phy-cells = <0>;
+
+ clock-output-names = "usb3_phy_pipe_clk_src";
+ clocks = <&gcc GCC_USB3_PHY_PIPE_CLK>;
+ clock-names = "pipe0";
+ };
+ };
+
+ hsusb_phy1: phy@7411000 {
+ compatible = "qcom,msm8996-qusb2-phy";
+ reg = <0x7411000 0x180>;
+ #phy-cells = <0>;
+
+ clocks = <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+ <&gcc GCC_RX1_USB2_CLKREF_CLK>;
+ clock-names = "cfg_ahb", "ref";
+
+ vdda-pll-supply = <&pm8994_l12>;
+ vdda-phy-dpdm-supply = <&pm8994_l24>;
+
+ resets = <&gcc GCC_QUSB2PHY_PRIM_BCR>;
+ nvmem-cells = <&qusb2p_hstx_trim>;
+ status = "disabled";
+ };
+
+ hsusb_phy2: phy@7412000 {
+ compatible = "qcom,msm8996-qusb2-phy";
+ reg = <0x7412000 0x180>;
+ #phy-cells = <0>;
+
+ clocks = <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+ <&gcc GCC_RX2_USB2_CLKREF_CLK>;
+ clock-names = "cfg_ahb", "ref";
+
+ vdda-pll-supply = <&pm8994_l12>;
+ vdda-phy-dpdm-supply = <&pm8994_l24>;
+
+ resets = <&gcc GCC_QUSB2PHY_SEC_BCR>;
+ nvmem-cells = <&qusb2s_hstx_trim>;
+ status = "disabled";
+ };
+
+ usb2: usb@7600000 {
+ compatible = "qcom,dwc3";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clocks = <&gcc GCC_PERIPH_NOC_USB20_AHB_CLK>,
+ <&gcc GCC_USB20_MASTER_CLK>,
+ <&gcc GCC_USB20_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB20_SLEEP_CLK>,
+ <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
+
+ assigned-clocks = <&gcc GCC_USB20_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB20_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <60000000>;
+
+ power-domains = <&gcc USB30_GDSC>;
+ status = "disabled";
+
+ dwc3@7600000 {
+ compatible = "snps,dwc3";
+ reg = <0x7600000 0xcc00>;
+ interrupts = <0 138 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&hsusb_phy2>;
+ phy-names = "usb2-phy";
+ };
+ };
+
+ usb3: usb@6a00000 {
+ compatible = "qcom,dwc3";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clocks = <&gcc GCC_SYS_NOC_USB3_AXI_CLK>,
+ <&gcc GCC_USB30_MASTER_CLK>,
+ <&gcc GCC_AGGRE2_USB3_AXI_CLK>,
+ <&gcc GCC_USB30_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB30_SLEEP_CLK>,
+ <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
+
+ assigned-clocks = <&gcc GCC_USB30_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB30_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <120000000>;
+
+ power-domains = <&gcc USB30_GDSC>;
+ status = "disabled";
+
+ dwc3@6a00000 {
+ compatible = "snps,dwc3";
+ reg = <0x6a00000 0xcc00>;
+ interrupts = <0 131 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&hsusb_phy1>, <&ssusb_phy_0>;
+ phy-names = "usb2-phy", "usb3-phy";
+ };
+ };
+
+ agnoc@0 {
+ power-domains = <&gcc AGGRE0_NOC_GDSC>;
+ compatible = "simple-pm-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ pcie0: pcie@600000 {
+ compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
+ status = "disabled";
+ power-domains = <&gcc PCIE0_GDSC>;
+ bus-range = <0x00 0xff>;
+ num-lanes = <1>;
+ msi-parent = <&msi_alias0>;
+
+ reg = <0x00600000 0x2000>,
+ <0x0c000000 0xf1d>,
+ <0x0c000f20 0xa8>,
+ <0x0c100000 0x100000>;
+ reg-names = "parf", "dbi", "elbi","config";
+
+ phys = <&pciephy_0>;
+ phy-names = "pciephy";
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x01000000 0x0 0x0c200000 0x0c200000 0x0 0x100000>,
+ <0x02000000 0x0 0x0c300000 0x0c300000 0x0 0xd00000>;
+
+ interrupts = <GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 244 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+ <0 0 0 2 &intc 0 245 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+ <0 0 0 3 &intc 0 247 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+ <0 0 0 4 &intc 0 248 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pcie0_clkreq_default &pcie0_perst_default &pcie0_wake_default>;
+ pinctrl-1 = <&pcie0_clkreq_sleep &pcie0_perst_default &pcie0_wake_sleep>;
+
+
+ vdda-supply = <&pm8994_l28>;
+
+ linux,pci-domain = <0>;
+
+ clocks = <&gcc GCC_PCIE_0_PIPE_CLK>,
+ <&gcc GCC_PCIE_0_AUX_CLK>,
+ <&gcc GCC_PCIE_0_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_0_MSTR_AXI_CLK>,
+ <&gcc GCC_PCIE_0_SLV_AXI_CLK>;
+
+ clock-names = "pipe",
+ "aux",
+ "cfg",
+ "bus_master",
+ "bus_slave";
+
+ };
+
+ pcie1: pcie@608000 {
+ compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
+ power-domains = <&gcc PCIE1_GDSC>;
+ bus-range = <0x00 0xff>;
+ msi-parent = <&msi_alias0>;
+ num-lanes = <1>;
+
+ status = "disabled";
+
+ reg = <0x00608000 0x2000>,
+ <0x0d000000 0xf1d>,
+ <0x0d000f20 0xa8>,
+ <0x0d100000 0x100000>;
+
+ reg-names = "parf", "dbi", "elbi","config";
+
+ phys = <&pciephy_1>;
+ phy-names = "pciephy";
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x01000000 0x0 0x0d200000 0x0d200000 0x0 0x100000>,
+ <0x02000000 0x0 0x0d300000 0x0d300000 0x0 0xd00000>;
+
+ interrupts = <GIC_SPI 413 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 272 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+ <0 0 0 2 &intc 0 273 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+ <0 0 0 3 &intc 0 274 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+ <0 0 0 4 &intc 0 275 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pcie1_clkreq_default &pcie1_perst_default &pcie1_wake_default>;
+ pinctrl-1 = <&pcie1_clkreq_sleep &pcie1_perst_default &pcie1_wake_sleep>;
+
+
+ vdda-supply = <&pm8994_l28>;
+ linux,pci-domain = <1>;
+
+ clocks = <&gcc GCC_PCIE_1_PIPE_CLK>,
+ <&gcc GCC_PCIE_1_AUX_CLK>,
+ <&gcc GCC_PCIE_1_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_1_MSTR_AXI_CLK>,
+ <&gcc GCC_PCIE_1_SLV_AXI_CLK>;
+
+ clock-names = "pipe",
+ "aux",
+ "cfg",
+ "bus_master",
+ "bus_slave";
+ };
+
+ pcie2: pcie@610000 {
+ compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
+ power-domains = <&gcc PCIE2_GDSC>;
+ bus-range = <0x00 0xff>;
+ msi-parent = <&msi_alias0>;
+ num-lanes = <1>;
+ status = "disabled";
+ reg = <0x00610000 0x2000>,
+ <0x0e000000 0xf1d>,
+ <0x0e000f20 0xa8>,
+ <0x0e100000 0x100000>;
+
+ reg-names = "parf", "dbi", "elbi","config";
+
+ phys = <&pciephy_2>;
+ phy-names = "pciephy";
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x01000000 0x0 0x0e200000 0x0e200000 0x0 0x100000>,
+ <0x02000000 0x0 0x0e300000 0x0e300000 0x0 0x1d00000>;
+
+ device_type = "pci";
+
+ interrupts = <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 142 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+ <0 0 0 2 &intc 0 143 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+ <0 0 0 3 &intc 0 144 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+ <0 0 0 4 &intc 0 145 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pcie2_clkreq_default &pcie2_perst_default &pcie2_wake_default>;
+ pinctrl-1 = <&pcie2_clkreq_sleep &pcie2_perst_default &pcie2_wake_sleep >;
+
+ vdda-supply = <&pm8994_l28>;
+
+ linux,pci-domain = <2>;
+ clocks = <&gcc GCC_PCIE_2_PIPE_CLK>,
+ <&gcc GCC_PCIE_2_AUX_CLK>,
+ <&gcc GCC_PCIE_2_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_2_MSTR_AXI_CLK>,
+ <&gcc GCC_PCIE_2_SLV_AXI_CLK>;
+
+ clock-names = "pipe",
+ "aux",
+ "cfg",
+ "bus_master",
+ "bus_slave";
+ };
+ };
+
+ adreno_smmu: arm,smmu@b40000 {
+ compatible = "qcom,msm8996-smmu-v2";
+ reg = <0xb40000 0x10000>;
+
+ #global-interrupts = <1>;
+ interrupts = <0 334 IRQ_TYPE_LEVEL_HIGH>,
+ <0 329 IRQ_TYPE_LEVEL_HIGH>,
+ <0 330 IRQ_TYPE_LEVEL_HIGH>;
+ #iommu-cells = <1>;
+
+ clocks = <&mmcc GPU_AHB_CLK>,
+ <&gcc GCC_MMSS_BIMC_GFX_CLK>;
+ clock-names = "bus", "iface";
+
+ power-domains = <&mmcc GPU_GDSC>;
+
+ status = "okay";
+ };
+
+ gpu@b00000 {
+ compatible = "qcom,adreno-530.2", "qcom,adreno";
+ #stream-id-cells = <16>;
+
+ reg = <0xb00000 0x3f000>;
+ reg-names = "kgsl_3d0_reg_memory";
+
+ interrupts = <0 300 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "kgsl_3d0_irq";
+
+ clocks = <&mmcc GPU_GX_GFX3D_CLK>,
+ <&mmcc GPU_AHB_CLK>,
+ <&mmcc GPU_GX_RBBMTIMER_CLK>,
+ <&gcc GCC_BIMC_GFX_CLK>,
+ <&gcc GCC_MMSS_BIMC_GFX_CLK>;
+
+ clock-names = "core",
+ "iface",
+ "rbbmtimer",
+ "mem",
+ "mem_iface";
+
+ power-domains = <&mmcc GPU_GDSC>;
+ iommus = <&adreno_smmu 0>;
+
+ qcom,gpu-quirk-two-pass-use-wfi;
+ qcom,gpu-quirk-fault-detect-mask;
+
+ /* This is a safe speed for bring up in all bin levels.
+ * This isn't the fastest the chip can go, but we can
+ * get there eventually */
+ qcom,gpu-pwrlevels {
+ compatible = "qcom,gpu-pwrlevels";
+ qcom,gpu-pwrlevel@0 {
+ qcom,gpu-freq = <510000000>;
+ };
+ qcom,gpu-pwrlevel@1 {
+ qcom,gpu-freq = <27000000>;
+ };
+ };
+
+ zap-shader {
+ memory-region = <&zap_shader_region>;
+ };
+ };
+
+ mdp_smmu: arm,smmu@d00000 {
+ compatible = "qcom,msm8996-smmu-v2";
+ reg = <0xd00000 0x10000>;
+
+ #global-interrupts = <1>;
+ interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>;
+ #iommu-cells = <1>;
+
+ power-domains = <&mmcc MDSS_GDSC>;
+
+ clocks = <&mmcc SMMU_MDP_AHB_CLK>,
+ <&mmcc SMMU_MDP_AXI_CLK>;
+ clock-names = "iface", "bus";
+
+ status = "okay";
+ };
+
+ mdss: mdss@900000 {
+ compatible = "qcom,mdss";
+
+ reg = <0x900000 0x1000>,
+ <0x9b0000 0x1040>,
+ <0x9b8000 0x1040>;
+ reg-names = "mdss_phys",
+ "vbif_phys",
+ "vbif_nrt_phys";
+
+ power-domains = <&mmcc MDSS_GDSC>;
+ interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ clocks = <&mmcc MDSS_AHB_CLK>;
+ clock-names = "iface_clk";
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ mdp: mdp@901000 {
+ compatible = "qcom,mdp5";
+ reg = <0x901000 0x90000>;
+ reg-names = "mdp_phys";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&mmcc MDSS_AHB_CLK>,
+ <&mmcc MDSS_AXI_CLK>,
+ <&mmcc MDSS_MDP_CLK>,
+ <&mmcc SMMU_MDP_AXI_CLK>,
+ <&mmcc MDSS_VSYNC_CLK>;
+ clock-names = "iface_clk",
+ "bus_clk",
+ "core_clk",
+ "iommu_clk",
+ "vsync_clk";
+
+ iommus = <&mdp_smmu 0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ mdp5_intf3_out: endpoint {
+ remote-endpoint = <&hdmi_in>;
+ };
+ };
+ };
+ };
+
+ hdmi: hdmi-tx@9a0000 {
+ compatible = "qcom,hdmi-tx-8996";
+ reg = <0x009a0000 0x50c>,
+ <0x00070000 0x6158>,
+ <0x009e0000 0xfff>;
+ reg-names = "core_physical",
+ "qfprom_physical",
+ "hdcp_physical";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&mmcc MDSS_MDP_CLK>,
+ <&mmcc MDSS_AHB_CLK>,
+ <&mmcc MDSS_HDMI_CLK>,
+ <&mmcc MDSS_HDMI_AHB_CLK>,
+ <&mmcc MDSS_EXTPCLK_CLK>;
+ clock-names =
+ "mdp_core_clk",
+ "iface_clk",
+ "core_clk",
+ "alt_iface_clk",
+ "extp_clk";
+
+ phys = <&hdmi_phy>;
+ phy-names = "hdmi_phy";
+ #sound-dai-cells = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ hdmi_in: endpoint {
+ remote-endpoint = <&mdp5_intf3_out>;
+ };
+ };
+ };
+ };
+
+ hdmi_phy: hdmi-phy@9a0600 {
+ compatible = "qcom,hdmi-phy-8996";
+ reg = <0x9a0600 0x1c4>,
+ <0x9a0a00 0x124>,
+ <0x9a0c00 0x124>,
+ <0x9a0e00 0x124>,
+ <0x9a1000 0x124>,
+ <0x9a1200 0x0c8>;
+ reg-names = "hdmi_pll",
+ "hdmi_tx_l0",
+ "hdmi_tx_l1",
+ "hdmi_tx_l2",
+ "hdmi_tx_l3",
+ "hdmi_phy";
+
+ clocks = <&mmcc MDSS_AHB_CLK>,
+ <&gcc GCC_HDMI_CLKREF_CLK>;
+ clock-names = "iface_clk",
+ "ref_clk";
+ };
+ };
+
+ qcom,sps {
+ compatible = "qcom,msm_sps_4k";
+ qcom,device-type = <3>;
+ qcom,pipe-attr-ee;
+ };
+
+ slim_msm:sc {
+ cell-index = <1>;
+ compatible = "qcom,slim-ngd";
+ reg = <0x91c0000 0x2C000>,
+ <0x9184000 0x32000>;
+ reg-names = "slimbus_physical", "slimbus_bam_physical";
+ interrupts = <0 163 IRQ_TYPE_LEVEL_HIGH>,
+ <0 164 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "slimbus_irq", "slimbus_bam_irq";
+ qcom,apps-ch-pipes = <0x60000000>;
+ qcom,ea-pc = <0x160>;
+
+ tasha_codec: tas {
+ pinctrl-0 = <&cdc_reset_active &wcd_intr_default>;
+ pinctrl-1 = <&cdc_reset_sleep>;
+ pinctrl-names = "default", "sleep";
+ qcom,gpio-int2 = <&msmgpio 54 0>;
+ qcom,cdc-reset-gpio = <&msmgpio 64 0>;
+
+ compatible = "qcom,tasha-slim-pgd";
+ elemental-addr = [00 01 A0 01 17 02];
+
+ clock-names = "mclk", "native";
+ clocks = <&rpmcc RPM_SMD_DIV_CLK1>,
+ <&rpmcc RPM_SMD_BB_CLK1>;
+
+ vdd-buck-supply = <&pm8994_s4>;
+ qcom,cdc-vdd-buck-voltage = <1800000 1800000>;
+ qcom,cdc-vdd-buck-current = <650000>;
+
+ buck-sido-supply = <&pm8994_s4>;
+ qcom,cdc-buck-sido-voltage = <1800000 1800000>;
+ qcom,cdc-buck-sido-current = <250000>;
+
+ vdd-tx-h-supply = <&pm8994_s4>;
+ qcom,cdc-vdd-tx-h-voltage = <1800000 1800000>;
+ qcom,cdc-vdd-tx-h-current = <25000>;
+
+ vdd-rx-h-supply = <&pm8994_s4>;
+ qcom,cdc-vdd-rx-h-voltage = <1800000 1800000>;
+ qcom,cdc-vdd-rx-h-current = <25000>;
+
+ vddpx-1-supply = <&pm8994_s4>;
+ qcom,cdc-vddpx-1-voltage = <1800000 1800000>;
+ qcom,cdc-vddpx-1-current = <10000>;
+
+ qcom,cdc-micbias1-mv = <1800>;
+ qcom,cdc-micbias2-mv = <1800>;
+ qcom,cdc-micbias3-mv = <1800>;
+ qcom,cdc-micbias4-mv = <1800>;
+
+ qcom,cdc-mclk-clk-rate = <9600000>;
+
+ qcom,cdc-slim-ifd = "tasha-slim-ifd";
+ qcom,cdc-slim-ifd-elemental-addr = [00 00 A0 01 17 02];
+
+ qcom,cdc-dmic-sample-rate = <4800000>;
+ qcom,cdc-mad-dmic-rate = <600000>;
+ qcom,clk1-gpio = <&pm8994_gpios 15 0>;
+ wcd9335:wcd {
+ compatible = "qcom,wcd9335";
+ #sound-dai-cells = <1>;
+ };
+ };
+ };
+
+ lpass_q6_smmu: arm,smmu-lpass_q6@1600000 {
+ compatible = "qcom,msm8996-smmu-v2";
+ reg = <0x1600000 0x20000>;
+ #iommu-cells = <1>;
+ power-domains = <&gcc HLOS1_VOTE_LPASS_CORE_GDSC>;
+
+ #global-interrupts = <1>;
+ interrupts = <GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 393 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 394 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 396 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_HLOS1_VOTE_LPASS_CORE_SMMU_CLK>,
+ <&gcc GCC_HLOS1_VOTE_LPASS_ADSP_SMMU_CLK>;
+ clock-names = "iface", "bus";
+ status = "okay";
+ };
+ };
+
+ adsp-pil {
+ compatible = "qcom,msm8996-adsp-pil";
+
+ interrupts-extended = <&intc 0 162 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 3 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog", "fatal", "ready",
+ "handover", "stop-ack";
+
+ clocks = <&xo_board>;
+ clock-names = "xo";
+
+ memory-region = <&adsp_region>;
+
+ qcom,smem-states = <&adsp_smp2p_out 0>;
+ qcom,smem-state-names = "stop";
+
+ smd-edge {
+ interrupts = <GIC_SPI 156 IRQ_TYPE_EDGE_RISING>;
+
+ label = "lpass";
+ qcom,ipc = <&apcs 16 8>;
+ qcom,smd-edge = <1>;
+ qcom,remote-pid = <2>;
+
+ apr {
+ compatible = "qcom,apr-v2";
+ qcom,smd-channels = "apr_audio_svc";
+ qcom,apr-dest-domain-id = <APR_DOMAIN_ADSP>;
+
+ q6core {
+ qcom,apr-svc-name = "CORE";
+ qcom,apr-svc-id = <APR_SVC_ADSP_CORE>;
+ compatible = "qcom,q6core";
+ };
+
+ q6afe: q6afe {
+ compatible = "qcom,q6afe";
+ qcom,apr-svc-name = "AFE";
+ qcom,apr-svc-id = <APR_SVC_AFE>;
+ #sound-dai-cells = <1>;
+ };
+
+ q6asm: q6asm {
+ compatible = "qcom,q6asm";
+ qcom,apr-svc-name = "ASM";
+ qcom,apr-svc-id = <APR_SVC_ASM>;
+ #sound-dai-cells = <1>;
+ };
+
+ q6adm: q6adm {
+ compatible = "qcom,q6adm";
+ qcom,apr-svc-name = "ADM";
+ qcom,apr-svc-id = <APR_SVC_ADM>;
+ #sound-dai-cells = <0>;
+ };
+
+ };
+ };
+ };
+
+ adsp-smp2p {
+ compatible = "qcom,smp2p";
+ qcom,smem = <443>, <429>;
+
+ interrupts = <0 158 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,ipc = <&apcs 16 10>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <2>;
+
+ adsp_smp2p_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ adsp_smp2p_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ modem-smp2p {
+ compatible = "qcom,smp2p";
+ qcom,smem = <435>, <428>;
+
+ interrupts = <GIC_SPI 451 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,ipc = <&apcs 16 14>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <1>;
+
+ modem_smp2p_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ modem_smp2p_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ smp2p-slpi {
+ compatible = "qcom,smp2p";
+ qcom,smem = <481>, <430>;
+
+ interrupts = <GIC_SPI 178 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,ipc = <&apcs 16 26>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <3>;
+
+ slpi_smp2p_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ slpi_smp2p_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+ };
+
+};
+#include "msm8996-pins.dtsi"
+#include "pm8994.dtsi"
+#include "pmi8994.dtsi"
diff --git a/rr-cache/1aa2efd67a58fd95dfe8b312372912124fd54dc4/preimage b/rr-cache/1aa2efd67a58fd95dfe8b312372912124fd54dc4/preimage
new file mode 100644
index 0000000..8388aa3
--- /dev/null
+++ b/rr-cache/1aa2efd67a58fd95dfe8b312372912124fd54dc4/preimage
@@ -0,0 +1,1653 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/qcom,gcc-msm8996.h>
+#include <dt-bindings/clock/qcom,mmcc-msm8996.h>
+#include <dt-bindings/clock/qcom,rpmcc.h>
+#include <dt-bindings/soc/qcom,apr.h>
+
+/ {
+ model = "Qualcomm Technologies, Inc. MSM8996";
+
+ interrupt-parent = <&intc>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen { };
+
+ memory {
+ device_type = "memory";
+ /* We expect the bootloader to fill in the reg */
+ reg = <0 0 0 0>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ mba_region: mba@91500000 {
+ reg = <0x0 0x91500000 0x0 0x200000>;
+ no-map;
+ };
+
+ slpi_region: slpi@90b00000 {
+ reg = <0x0 0x90b00000 0x0 0xa00000>;
+ no-map;
+ };
+
+ venus_region: venus@90400000 {
+ reg = <0x0 0x90400000 0x0 0x700000>;
+ no-map;
+ };
+
+ adsp_region: adsp@8ea00000 {
+ reg = <0x0 0x8ea00000 0x0 0x1a00000>;
+ no-map;
+ };
+
+ mpss_region: mpss@88800000 {
+ reg = <0x0 0x88800000 0x0 0x6200000>;
+ no-map;
+ };
+
+ smem_mem: smem-mem@86000000 {
+ reg = <0x0 0x86000000 0x0 0x200000>;
+ no-map;
+ };
+
+ memory@85800000 {
+ reg = <0x0 0x85800000 0x0 0x800000>;
+ no-map;
+ };
+
+ memory@86200000 {
+ reg = <0x0 0x86200000 0x0 0x2600000>;
+ no-map;
+ };
+
+ rmtfs@86700000 {
+ compatible = "qcom,rmtfs-mem";
+
+ size = <0x0 0x200000>;
+ alloc-ranges = <0x0 0xa0000000 0x0 0x2000000>;
+ no-map;
+
+ qcom,client-id = <1>;
+ qcom,vmid = <15>;
+ };
+
+ zap_shader_region: gpu@8f200000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x8f200000 0 0x2300000>;
+ no-map;
+ };
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ CPU0: cpu@0 {
+ device_type = "cpu";
+ compatible = "qcom,kryo";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ next-level-cache = <&L2_0>;
+ L2_0: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ };
+ };
+
+ CPU1: cpu@1 {
+ device_type = "cpu";
+ compatible = "qcom,kryo";
+ reg = <0x0 0x1>;
+ enable-method = "psci";
+ next-level-cache = <&L2_0>;
+ };
+
+ CPU2: cpu@100 {
+ device_type = "cpu";
+ compatible = "qcom,kryo";
+ reg = <0x0 0x100>;
+ enable-method = "psci";
+ next-level-cache = <&L2_1>;
+ L2_1: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ };
+ };
+
+ CPU3: cpu@101 {
+ device_type = "cpu";
+ compatible = "qcom,kryo";
+ reg = <0x0 0x101>;
+ enable-method = "psci";
+ next-level-cache = <&L2_1>;
+ };
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&CPU0>;
+ };
+
+ core1 {
+ cpu = <&CPU1>;
+ };
+ };
+
+ cluster1 {
+ core0 {
+ cpu = <&CPU2>;
+ };
+
+ core1 {
+ cpu = <&CPU3>;
+ };
+ };
+ };
+ };
+
+ thermal-zones {
+ cpu-thermal0 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 3>;
+
+ trips {
+ cpu_alert0: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit0: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal1 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 5>;
+
+ trips {
+ cpu_alert1: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit1: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal2 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 8>;
+
+ trips {
+ cpu_alert2: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit2: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal3 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 10>;
+
+ trips {
+ cpu_alert3: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit3: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ clocks {
+ xo_board: xo_board {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <19200000>;
+ clock-output-names = "xo_board";
+ };
+
+ sleep_clk: sleep_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32764>;
+ clock-output-names = "sleep_clk";
+ };
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ firmware {
+ scm {
+ compatible = "qcom,scm-msm8996";
+
+ qcom,dload-mode = <&tcsr 0x13000>;
+ };
+ };
+
+ tcsr_mutex: hwlock {
+ compatible = "qcom,tcsr-mutex";
+ syscon = <&tcsr_mutex_regs 0 0x1000>;
+ #hwlock-cells = <1>;
+ };
+
+ smem {
+ compatible = "qcom,smem";
+ memory-region = <&smem_mem>;
+ hwlocks = <&tcsr_mutex 3>;
+ };
+
+ rpm-glink {
+ compatible = "qcom,glink-rpm";
+
+ interrupts = <GIC_SPI 168 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,rpm-msg-ram = <&rpm_msg_ram>;
+
+ mboxes = <&apcs_glb 0>;
+
+ rpm_requests {
+ compatible = "qcom,rpm-msm8996";
+ qcom,glink-channels = "rpm_requests";
+
+ rpmcc: qcom,rpmcc {
+ compatible = "qcom,rpmcc-msm8996";
+ #clock-cells = <1>;
+ };
+
+ pm8994-regulators {
+ compatible = "qcom,rpm-pm8994-regulators";
+
+ pm8994_s1: s1 {};
+ pm8994_s2: s2 {};
+ pm8994_s3: s3 {};
+ pm8994_s4: s4 {};
+ pm8994_s5: s5 {};
+ pm8994_s6: s6 {};
+ pm8994_s7: s7 {};
+ pm8994_s8: s8 {};
+ pm8994_s9: s9 {};
+ pm8994_s10: s10 {};
+ pm8994_s11: s11 {};
+ pm8994_s12: s12 {};
+
+ pm8994_l1: l1 {};
+ pm8994_l2: l2 {};
+ pm8994_l3: l3 {};
+ pm8994_l4: l4 {};
+ pm8994_l5: l5 {};
+ pm8994_l6: l6 {};
+ pm8994_l7: l7 {};
+ pm8994_l8: l8 {};
+ pm8994_l9: l9 {};
+ pm8994_l10: l10 {};
+ pm8994_l11: l11 {};
+ pm8994_l12: l12 {};
+ pm8994_l13: l13 {};
+ pm8994_l14: l14 {};
+ pm8994_l15: l15 {};
+ pm8994_l16: l16 {};
+ pm8994_l17: l17 {};
+ pm8994_l18: l18 {};
+ pm8994_l19: l19 {};
+ pm8994_l20: l20 {};
+ pm8994_l21: l21 {};
+ pm8994_l22: l22 {};
+ pm8994_l23: l23 {};
+ pm8994_l24: l24 {};
+ pm8994_l25: l25 {};
+ pm8994_l26: l26 {};
+ pm8994_l27: l27 {};
+ pm8994_l28: l28 {};
+ pm8994_l29: l29 {};
+ pm8994_l30: l30 {};
+ pm8994_l31: l31 {};
+ pm8994_l32: l32 {};
+ };
+
+ };
+ };
+
+ soc: soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0xffffffff>;
+ compatible = "simple-bus";
+
+ rpm_msg_ram: memory@68000 {
+ compatible = "qcom,rpm-msg-ram";
+ reg = <0x68000 0x6000>;
+ };
+
+ tcsr_mutex_regs: syscon@740000 {
+ compatible = "syscon";
+ reg = <0x740000 0x20000>;
+ };
+
+ tcsr: syscon@7a0000 {
+ compatible = "qcom,tcsr-msm8996", "syscon";
+ reg = <0x7a0000 0x18000>;
+ };
+
+ intc: interrupt-controller@9bc0000 {
+ compatible = "arm,gic-v3";
+ #interrupt-cells = <3>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ interrupt-controller;
+ #redistributor-regions = <1>;
+ redistributor-stride = <0x0 0x40000>;
+ reg = <0x09bc0000 0x10000>,
+ <0x09c00000 0x100000>;
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+
+ msi_alias0: interrupt-controller@9bd0000 {
+ compatible = "qcom,gic-msi-aliases";
+ reg = <0x9bd0000 0x1000>;
+ msi-controller;
+ arm,spi-ranges = <544 96>;
+ };
+ };
+
+ apcs: syscon@9820000 {
+ compatible = "syscon";
+ reg = <0x9820000 0x1000>;
+ };
+
+ apcs_glb: mailbox@9820000 {
+ compatible = "qcom,msm8996-apcs-hmss-global";
+ reg = <0x9820000 0x1000>;
+
+ #mbox-cells = <1>;
+ };
+
+ gcc: clock-controller@300000 {
+ compatible = "qcom,gcc-msm8996";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ reg = <0x300000 0x90000>;
+ };
+
+ kryocc: clock-controller@6400000 {
+ compatible = "qcom,apcc-msm8996";
+ reg = <0x6400000 0x90000>;
+ #clock-cells = <1>;
+ };
+
+ blsp1_uart1: serial@7570000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0x07570000 0x1000>;
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_UART2_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ status = "disabled";
+ };
+
+ blsp1_spi0: spi@7575000 {
+ compatible = "qcom,spi-qup-v2.2.1";
+ reg = <0x07575000 0x600>;
+ interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_QUP1_SPI_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp1_spi0_default>;
+ pinctrl-1 = <&blsp1_spi0_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp2_i2c0: i2c@75b5000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x075b5000 0x1000>;
+ interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_AHB_CLK>,
+ <&gcc GCC_BLSP2_QUP1_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_i2c0_default>;
+ pinctrl-1 = <&blsp2_i2c0_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ tsens0: thermal-sensor@4a8000 {
+ compatible = "qcom,msm8996-tsens";
+ reg = <0x4a8000 0x2000>;
+ #thermal-sensor-cells = <1>;
+ };
+
+ blsp2_uart1: serial@75b0000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0x75b0000 0x1000>;
+ interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_UART2_APPS_CLK>,
+ <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "core", "iface";
+ status = "disabled";
+ };
+
+ blsp2_i2c1: i2c@75b6000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x075b6000 0x1000>;
+ interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_AHB_CLK>,
+ <&gcc GCC_BLSP2_QUP2_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_i2c1_default>;
+ pinctrl-1 = <&blsp2_i2c1_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp2_uart2: serial@75b1000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0x075b1000 0x1000>;
+ interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_UART3_APPS_CLK>,
+ <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "core", "iface";
+ status = "disabled";
+ };
+
+ blsp1_i2c2: i2c@7577000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x07577000 0x1000>;
+ interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_AHB_CLK>,
+ <&gcc GCC_BLSP1_QUP3_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp1_i2c2_default>;
+ pinctrl-1 = <&blsp1_i2c2_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp2_spi5: spi@75ba000{
+ compatible = "qcom,spi-qup-v2.2.1";
+ reg = <0x075ba000 0x600>;
+ interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_QUP6_SPI_APPS_CLK>,
+ <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "core", "iface";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_spi5_default>;
+ pinctrl-1 = <&blsp2_spi5_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ sdhc2: sdhci@74a4900 {
+ status = "disabled";
+ compatible = "qcom,sdhci-msm-v4";
+ reg = <0x74a4900 0x314>, <0x74a4000 0x800>;
+ reg-names = "hc_mem", "core_mem";
+
+ interrupts = <0 125 IRQ_TYPE_LEVEL_HIGH>,
+ <0 221 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hc_irq", "pwr_irq";
+
+ clock-names = "iface", "core", "xo";
+ clocks = <&gcc GCC_SDCC2_AHB_CLK>,
+ <&gcc GCC_SDCC2_APPS_CLK>,
+ <&xo_board>;
+ bus-width = <4>;
+ };
+
+ msmgpio: pinctrl@1010000 {
+ compatible = "qcom,msm8996-pinctrl";
+ reg = <0x01010000 0x300000>;
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ timer@9840000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ compatible = "arm,armv7-timer-mem";
+ reg = <0x09840000 0x1000>;
+ clock-frequency = <19200000>;
+
+ frame@9850000 {
+ frame-number = <0>;
+ interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x09850000 0x1000>,
+ <0x09860000 0x1000>;
+ };
+
+ frame@9870000 {
+ frame-number = <1>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x09870000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@9880000 {
+ frame-number = <2>;
+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x09880000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@9890000 {
+ frame-number = <3>;
+ interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x09890000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@98a0000 {
+ frame-number = <4>;
+ interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x098a0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@98b0000 {
+ frame-number = <5>;
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x098b0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@98c0000 {
+ frame-number = <6>;
+ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x098c0000 0x1000>;
+ status = "disabled";
+ };
+ };
+
+ spmi_bus: qcom,spmi@400f000 {
+ compatible = "qcom,spmi-pmic-arb";
+ reg = <0x400f000 0x1000>,
+ <0x4400000 0x800000>,
+ <0x4c00000 0x800000>,
+ <0x5800000 0x200000>,
+ <0x400a000 0x002100>;
+ reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
+ interrupt-names = "periph_irq";
+ interrupts = <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,ee = <0>;
+ qcom,channel = <0>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ interrupt-controller;
+ #interrupt-cells = <4>;
+ };
+
+ ufsphy: phy@627000 {
+ compatible = "qcom,msm8996-ufs-phy-qmp-14nm";
+ reg = <0x627000 0xda8>;
+ reg-names = "phy_mem";
+ #phy-cells = <0>;
+
+ vdda-phy-supply = <&pm8994_l28>;
+ vdda-pll-supply = <&pm8994_l12>;
+
+ vdda-phy-max-microamp = <18380>;
+ vdda-pll-max-microamp = <9440>;
+
+ vddp-ref-clk-supply = <&pm8994_l25>;
+ vddp-ref-clk-max-microamp = <100>;
+ vddp-ref-clk-always-on;
+
+ clock-names = "ref_clk_src", "ref_clk";
+ clocks = <&rpmcc RPM_SMD_LN_BB_CLK>,
+ <&gcc GCC_UFS_CLKREF_CLK>;
+ status = "disabled";
+ };
+
+ ufshc@624000 {
+ compatible = "qcom,ufshc";
+ reg = <0x624000 0x2500>;
+ interrupts = <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>;
+
+ phys = <&ufsphy>;
+ phy-names = "ufsphy";
+
+ vcc-supply = <&pm8994_l20>;
+ vccq-supply = <&pm8994_l25>;
+ vccq2-supply = <&pm8994_s4>;
+
+ vcc-max-microamp = <600000>;
+ vccq-max-microamp = <450000>;
+ vccq2-max-microamp = <450000>;
+
+ power-domains = <&gcc UFS_GDSC>;
+
+ clock-names =
+ "core_clk_src",
+ "core_clk",
+ "bus_clk",
+ "bus_aggr_clk",
+ "iface_clk",
+ "core_clk_unipro_src",
+ "core_clk_unipro",
+ "core_clk_ice",
+ "ref_clk",
+ "tx_lane0_sync_clk",
+ "rx_lane0_sync_clk";
+ clocks =
+ <&gcc UFS_AXI_CLK_SRC>,
+ <&gcc GCC_UFS_AXI_CLK>,
+ <&gcc GCC_SYS_NOC_UFS_AXI_CLK>,
+ <&gcc GCC_AGGRE2_UFS_AXI_CLK>,
+ <&gcc GCC_UFS_AHB_CLK>,
+ <&gcc UFS_ICE_CORE_CLK_SRC>,
+ <&gcc GCC_UFS_UNIPRO_CORE_CLK>,
+ <&gcc GCC_UFS_ICE_CORE_CLK>,
+ <&rpmcc RPM_SMD_LN_BB_CLK>,
+ <&gcc GCC_UFS_TX_SYMBOL_0_CLK>,
+ <&gcc GCC_UFS_RX_SYMBOL_0_CLK>;
+ freq-table-hz =
+ <100000000 200000000>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <150000000 300000000>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>;
+
+ lanes-per-direction = <1>;
+ status = "disabled";
+
+ ufs_variant {
+ compatible = "qcom,ufs_variant";
+ };
+ };
+
+ mmcc: clock-controller@8c0000 {
+ compatible = "qcom,mmcc-msm8996";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ reg = <0x8c0000 0x40000>;
+ assigned-clocks = <&mmcc MMPLL9_PLL>,
+ <&mmcc MMPLL1_PLL>,
+ <&mmcc MMPLL3_PLL>,
+ <&mmcc MMPLL4_PLL>,
+ <&mmcc MMPLL5_PLL>;
+ assigned-clock-rates = <624000000>,
+ <810000000>,
+ <980000000>,
+ <960000000>,
+ <825000000>;
+ };
+
+ qfprom@74000 {
+ compatible = "qcom,qfprom";
+ reg = <0x74000 0x8ff>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ qusb2p_hstx_trim: hstx_trim@24e {
+ reg = <0x24e 0x2>;
+ bits = <5 4>;
+ };
+
+ qusb2s_hstx_trim: hstx_trim@24f {
+ reg = <0x24f 0x1>;
+ bits = <1 4>;
+ };
+ };
+
+ phy@34000 {
+ compatible = "qcom,msm8996-qmp-pcie-phy";
+ reg = <0x34000 0x488>;
+ #clock-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
+ <&gcc GCC_PCIE_PHY_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_CLKREF_CLK>;
+ clock-names = "aux", "cfg_ahb", "ref";
+
+ vdda-phy-supply = <&pm8994_l28>;
+ vdda-pll-supply = <&pm8994_l12>;
+
+ resets = <&gcc GCC_PCIE_PHY_BCR>,
+ <&gcc GCC_PCIE_PHY_COM_BCR>,
+ <&gcc GCC_PCIE_PHY_COM_NOCSR_BCR>;
+ reset-names = "phy", "common", "cfg";
+ status = "disabled";
+
+ pciephy_0: lane@35000 {
+ reg = <0x035000 0x130>,
+ <0x035200 0x200>,
+ <0x035400 0x1dc>;
+ #phy-cells = <0>;
+
+ clock-output-names = "pcie_0_pipe_clk_src";
+ clocks = <&gcc GCC_PCIE_0_PIPE_CLK>;
+ clock-names = "pipe0";
+ resets = <&gcc GCC_PCIE_0_PHY_BCR>;
+ reset-names = "lane0";
+ };
+
+ pciephy_1: lane@36000 {
+ reg = <0x036000 0x130>,
+ <0x036200 0x200>,
+ <0x036400 0x1dc>;
+ #phy-cells = <0>;
+
+ clock-output-names = "pcie_1_pipe_clk_src";
+ clocks = <&gcc GCC_PCIE_1_PIPE_CLK>;
+ clock-names = "pipe1";
+ resets = <&gcc GCC_PCIE_1_PHY_BCR>;
+ reset-names = "lane1";
+ };
+
+ pciephy_2: lane@37000 {
+ reg = <0x037000 0x130>,
+ <0x037200 0x200>,
+ <0x037400 0x1dc>;
+ #phy-cells = <0>;
+
+ clock-output-names = "pcie_2_pipe_clk_src";
+ clocks = <&gcc GCC_PCIE_2_PIPE_CLK>;
+ clock-names = "pipe2";
+ resets = <&gcc GCC_PCIE_2_PHY_BCR>;
+ reset-names = "lane2";
+ };
+ };
+
+ phy@7410000 {
+ compatible = "qcom,msm8996-qmp-usb3-phy";
+ reg = <0x7410000 0x1c4>;
+ #clock-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clocks = <&gcc GCC_USB3_PHY_AUX_CLK>,
+ <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+ <&gcc GCC_USB3_CLKREF_CLK>;
+ clock-names = "aux", "cfg_ahb", "ref";
+
+ vdda-phy-supply = <&pm8994_l28>;
+ vdda-pll-supply = <&pm8994_l12>;
+
+ resets = <&gcc GCC_USB3_PHY_BCR>,
+ <&gcc GCC_USB3PHY_PHY_BCR>;
+ reset-names = "phy", "common";
+ status = "disabled";
+
+ ssusb_phy_0: lane@7410200 {
+ reg = <0x7410200 0x200>,
+ <0x7410400 0x130>,
+ <0x7410600 0x1a8>;
+ #phy-cells = <0>;
+
+ clock-output-names = "usb3_phy_pipe_clk_src";
+ clocks = <&gcc GCC_USB3_PHY_PIPE_CLK>;
+ clock-names = "pipe0";
+ };
+ };
+
+ hsusb_phy1: phy@7411000 {
+ compatible = "qcom,msm8996-qusb2-phy";
+ reg = <0x7411000 0x180>;
+ #phy-cells = <0>;
+
+ clocks = <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+ <&gcc GCC_RX1_USB2_CLKREF_CLK>;
+ clock-names = "cfg_ahb", "ref";
+
+ vdda-pll-supply = <&pm8994_l12>;
+ vdda-phy-dpdm-supply = <&pm8994_l24>;
+
+ resets = <&gcc GCC_QUSB2PHY_PRIM_BCR>;
+ nvmem-cells = <&qusb2p_hstx_trim>;
+ status = "disabled";
+ };
+
+ hsusb_phy2: phy@7412000 {
+ compatible = "qcom,msm8996-qusb2-phy";
+ reg = <0x7412000 0x180>;
+ #phy-cells = <0>;
+
+ clocks = <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+ <&gcc GCC_RX2_USB2_CLKREF_CLK>;
+ clock-names = "cfg_ahb", "ref";
+
+ vdda-pll-supply = <&pm8994_l12>;
+ vdda-phy-dpdm-supply = <&pm8994_l24>;
+
+ resets = <&gcc GCC_QUSB2PHY_SEC_BCR>;
+ nvmem-cells = <&qusb2s_hstx_trim>;
+ status = "disabled";
+ };
+
+ usb2: usb@7600000 {
+ compatible = "qcom,dwc3";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clocks = <&gcc GCC_PERIPH_NOC_USB20_AHB_CLK>,
+ <&gcc GCC_USB20_MASTER_CLK>,
+ <&gcc GCC_USB20_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB20_SLEEP_CLK>,
+ <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
+
+ assigned-clocks = <&gcc GCC_USB20_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB20_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <60000000>;
+
+ power-domains = <&gcc USB30_GDSC>;
+ status = "disabled";
+
+ dwc3@7600000 {
+ compatible = "snps,dwc3";
+ reg = <0x7600000 0xcc00>;
+ interrupts = <0 138 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&hsusb_phy2>;
+ phy-names = "usb2-phy";
+ };
+ };
+
+ usb3: usb@6a00000 {
+ compatible = "qcom,dwc3";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clocks = <&gcc GCC_SYS_NOC_USB3_AXI_CLK>,
+ <&gcc GCC_USB30_MASTER_CLK>,
+ <&gcc GCC_AGGRE2_USB3_AXI_CLK>,
+ <&gcc GCC_USB30_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB30_SLEEP_CLK>,
+ <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
+
+ assigned-clocks = <&gcc GCC_USB30_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB30_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <120000000>;
+
+ power-domains = <&gcc USB30_GDSC>;
+ status = "disabled";
+
+ dwc3@6a00000 {
+ compatible = "snps,dwc3";
+ reg = <0x6a00000 0xcc00>;
+ interrupts = <0 131 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&hsusb_phy1>, <&ssusb_phy_0>;
+ phy-names = "usb2-phy", "usb3-phy";
+ };
+ };
+
+ agnoc@0 {
+ power-domains = <&gcc AGGRE0_NOC_GDSC>;
+ compatible = "simple-pm-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ pcie0: pcie@600000 {
+ compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
+ status = "disabled";
+ power-domains = <&gcc PCIE0_GDSC>;
+ bus-range = <0x00 0xff>;
+ num-lanes = <1>;
+ msi-parent = <&msi_alias0>;
+
+ reg = <0x00600000 0x2000>,
+ <0x0c000000 0xf1d>,
+ <0x0c000f20 0xa8>,
+ <0x0c100000 0x100000>;
+ reg-names = "parf", "dbi", "elbi","config";
+
+ phys = <&pciephy_0>;
+ phy-names = "pciephy";
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x01000000 0x0 0x0c200000 0x0c200000 0x0 0x100000>,
+ <0x02000000 0x0 0x0c300000 0x0c300000 0x0 0xd00000>;
+
+ interrupts = <GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 244 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+ <0 0 0 2 &intc 0 245 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+ <0 0 0 3 &intc 0 247 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+ <0 0 0 4 &intc 0 248 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pcie0_clkreq_default &pcie0_perst_default &pcie0_wake_default>;
+ pinctrl-1 = <&pcie0_clkreq_sleep &pcie0_perst_default &pcie0_wake_sleep>;
+
+
+ vdda-supply = <&pm8994_l28>;
+
+ linux,pci-domain = <0>;
+
+ clocks = <&gcc GCC_PCIE_0_PIPE_CLK>,
+ <&gcc GCC_PCIE_0_AUX_CLK>,
+ <&gcc GCC_PCIE_0_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_0_MSTR_AXI_CLK>,
+ <&gcc GCC_PCIE_0_SLV_AXI_CLK>;
+
+ clock-names = "pipe",
+ "aux",
+ "cfg",
+ "bus_master",
+ "bus_slave";
+
+ };
+
+ pcie1: pcie@608000 {
+ compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
+ power-domains = <&gcc PCIE1_GDSC>;
+ bus-range = <0x00 0xff>;
+ msi-parent = <&msi_alias0>;
+ num-lanes = <1>;
+
+ status = "disabled";
+
+ reg = <0x00608000 0x2000>,
+ <0x0d000000 0xf1d>,
+ <0x0d000f20 0xa8>,
+ <0x0d100000 0x100000>;
+
+ reg-names = "parf", "dbi", "elbi","config";
+
+ phys = <&pciephy_1>;
+ phy-names = "pciephy";
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x01000000 0x0 0x0d200000 0x0d200000 0x0 0x100000>,
+ <0x02000000 0x0 0x0d300000 0x0d300000 0x0 0xd00000>;
+
+ interrupts = <GIC_SPI 413 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 272 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+ <0 0 0 2 &intc 0 273 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+ <0 0 0 3 &intc 0 274 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+ <0 0 0 4 &intc 0 275 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pcie1_clkreq_default &pcie1_perst_default &pcie1_wake_default>;
+ pinctrl-1 = <&pcie1_clkreq_sleep &pcie1_perst_default &pcie1_wake_sleep>;
+
+
+ vdda-supply = <&pm8994_l28>;
+ linux,pci-domain = <1>;
+
+ clocks = <&gcc GCC_PCIE_1_PIPE_CLK>,
+ <&gcc GCC_PCIE_1_AUX_CLK>,
+ <&gcc GCC_PCIE_1_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_1_MSTR_AXI_CLK>,
+ <&gcc GCC_PCIE_1_SLV_AXI_CLK>;
+
+ clock-names = "pipe",
+ "aux",
+ "cfg",
+ "bus_master",
+ "bus_slave";
+ };
+
+ pcie2: pcie@610000 {
+ compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
+ power-domains = <&gcc PCIE2_GDSC>;
+ bus-range = <0x00 0xff>;
+ msi-parent = <&msi_alias0>;
+ num-lanes = <1>;
+ status = "disabled";
+ reg = <0x00610000 0x2000>,
+ <0x0e000000 0xf1d>,
+ <0x0e000f20 0xa8>,
+ <0x0e100000 0x100000>;
+
+ reg-names = "parf", "dbi", "elbi","config";
+
+ phys = <&pciephy_2>;
+ phy-names = "pciephy";
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x01000000 0x0 0x0e200000 0x0e200000 0x0 0x100000>,
+ <0x02000000 0x0 0x0e300000 0x0e300000 0x0 0x1d00000>;
+
+ device_type = "pci";
+
+ interrupts = <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 142 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+ <0 0 0 2 &intc 0 143 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+ <0 0 0 3 &intc 0 144 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+ <0 0 0 4 &intc 0 145 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pcie2_clkreq_default &pcie2_perst_default &pcie2_wake_default>;
+ pinctrl-1 = <&pcie2_clkreq_sleep &pcie2_perst_default &pcie2_wake_sleep >;
+
+ vdda-supply = <&pm8994_l28>;
+
+ linux,pci-domain = <2>;
+ clocks = <&gcc GCC_PCIE_2_PIPE_CLK>,
+ <&gcc GCC_PCIE_2_AUX_CLK>,
+ <&gcc GCC_PCIE_2_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_2_MSTR_AXI_CLK>,
+ <&gcc GCC_PCIE_2_SLV_AXI_CLK>;
+
+ clock-names = "pipe",
+ "aux",
+ "cfg",
+ "bus_master",
+ "bus_slave";
+ };
+ };
+
+<<<<<<<
+=======
+ ufsphy1: ufsphy@627000 {
+ compatible = "qcom,msm8996-ufs-phy-qmp-14nm";
+ reg = <0x627000 0xda8>;
+ reg-names = "phy_mem";
+ #phy-cells = <0>;
+ vdda-phy-max-microamp = <18380>;
+ vdda-pll-max-microamp = <9440>;
+
+ vddp-ref-clk-supply = <&pm8994_l25>;
+ vddp-ref-clk-max-microamp = <100>;
+ vddp-ref-clk-always-on;
+ clock-names = "ref_clk_src", "ref_clk";
+ clocks = <&rpmcc RPM_SMD_LN_BB_CLK>,
+ <&gcc GCC_UFS_CLKREF_CLK>;
+ status = "disabled";
+ power-domains = <&gcc UFS_GDSC>;
+
+ };
+
+ ufshc@624000 {
+ compatible = "qcom,ufshc";
+ reg = <0x624000 0x2500>;
+ interrupts = <0 265 IRQ_TYPE_LEVEL_HIGH>;
+
+ phys = <&ufsphy1>;
+ phy-names = "ufsphy";
+
+ vcc-supply = <&pm8994_l20>;
+ vccq-supply = <&pm8994_l25>;
+ vccq2-supply = <&pm8994_s4>;
+
+ vcc-max-microamp = <600000>;
+ vccq-max-microamp = <450000>;
+ vccq2-max-microamp = <450000>;
+
+ clock-names =
+ "core_clk_src",
+ "core_clk",
+ "bus_clk",
+ "bus_aggr_clk",
+ "iface_clk",
+ "core_clk_unipro_src",
+ "core_clk_unipro",
+ "core_clk_ice",
+ "ref_clk",
+ "tx_lane0_sync_clk",
+ "rx_lane0_sync_clk";
+ clocks =
+ <&gcc UFS_AXI_CLK_SRC>,
+ <&gcc GCC_UFS_AXI_CLK>,
+ <&gcc GCC_SYS_NOC_UFS_AXI_CLK>,
+ <&gcc GCC_AGGRE2_UFS_AXI_CLK>,
+ <&gcc GCC_UFS_AHB_CLK>,
+ <&gcc UFS_ICE_CORE_CLK_SRC>,
+ <&gcc GCC_UFS_UNIPRO_CORE_CLK>,
+ <&gcc GCC_UFS_ICE_CORE_CLK>,
+ <&rpmcc RPM_SMD_LN_BB_CLK>,
+ <&gcc GCC_UFS_TX_SYMBOL_0_CLK>,
+ <&gcc GCC_UFS_RX_SYMBOL_0_CLK>;
+ freq-table-hz =
+ <100000000 200000000>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <150000000 300000000>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>;
+
+ lanes-per-direction = <1>;
+ status = "disabled";
+
+ ufs_variant {
+ compatible = "qcom,ufs_variant";
+ };
+ };
+
+>>>>>>>
+ adreno_smmu: arm,smmu@b40000 {
+ compatible = "qcom,msm8996-smmu-v2";
+ reg = <0xb40000 0x10000>;
+
+ #global-interrupts = <1>;
+ interrupts = <0 334 IRQ_TYPE_LEVEL_HIGH>,
+ <0 329 IRQ_TYPE_LEVEL_HIGH>,
+ <0 330 IRQ_TYPE_LEVEL_HIGH>;
+ #iommu-cells = <1>;
+
+ clocks = <&mmcc GPU_AHB_CLK>,
+ <&gcc GCC_MMSS_BIMC_GFX_CLK>;
+ clock-names = "bus", "iface";
+
+ power-domains = <&mmcc GPU_GDSC>;
+
+ status = "okay";
+ };
+
+ gpu@b00000 {
+ compatible = "qcom,adreno-530.2", "qcom,adreno";
+ #stream-id-cells = <16>;
+
+ reg = <0xb00000 0x3f000>;
+ reg-names = "kgsl_3d0_reg_memory";
+
+ interrupts = <0 300 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "kgsl_3d0_irq";
+
+ clocks = <&mmcc GPU_GX_GFX3D_CLK>,
+ <&mmcc GPU_AHB_CLK>,
+ <&mmcc GPU_GX_RBBMTIMER_CLK>,
+ <&gcc GCC_BIMC_GFX_CLK>,
+ <&gcc GCC_MMSS_BIMC_GFX_CLK>;
+
+ clock-names = "core",
+ "iface",
+ "rbbmtimer",
+ "mem",
+ "mem_iface";
+
+ power-domains = <&mmcc GPU_GDSC>;
+ iommus = <&adreno_smmu 0>;
+
+ qcom,gpu-quirk-two-pass-use-wfi;
+ qcom,gpu-quirk-fault-detect-mask;
+
+ /* This is a safe speed for bring up in all bin levels.
+ * This isn't the fastest the chip can go, but we can
+ * get there eventually */
+ qcom,gpu-pwrlevels {
+ compatible = "qcom,gpu-pwrlevels";
+ qcom,gpu-pwrlevel@0 {
+ qcom,gpu-freq = <510000000>;
+ };
+ qcom,gpu-pwrlevel@1 {
+ qcom,gpu-freq = <27000000>;
+ };
+ };
+
+ zap-shader {
+ memory-region = <&zap_shader_region>;
+ };
+ };
+
+ mdp_smmu: arm,smmu@d00000 {
+ compatible = "qcom,msm8996-smmu-v2";
+ reg = <0xd00000 0x10000>;
+
+ #global-interrupts = <1>;
+ interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>;
+ #iommu-cells = <1>;
+
+ power-domains = <&mmcc MDSS_GDSC>;
+
+ clocks = <&mmcc SMMU_MDP_AHB_CLK>,
+ <&mmcc SMMU_MDP_AXI_CLK>;
+ clock-names = "iface", "bus";
+
+ status = "okay";
+ };
+
+ mdss: mdss@900000 {
+ compatible = "qcom,mdss";
+
+ reg = <0x900000 0x1000>,
+ <0x9b0000 0x1040>,
+ <0x9b8000 0x1040>;
+ reg-names = "mdss_phys",
+ "vbif_phys",
+ "vbif_nrt_phys";
+
+ power-domains = <&mmcc MDSS_GDSC>;
+ interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ clocks = <&mmcc MDSS_AHB_CLK>;
+ clock-names = "iface_clk";
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ mdp: mdp@901000 {
+ compatible = "qcom,mdp5";
+ reg = <0x901000 0x90000>;
+ reg-names = "mdp_phys";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&mmcc MDSS_AHB_CLK>,
+ <&mmcc MDSS_AXI_CLK>,
+ <&mmcc MDSS_MDP_CLK>,
+ <&mmcc SMMU_MDP_AXI_CLK>,
+ <&mmcc MDSS_VSYNC_CLK>;
+ clock-names = "iface_clk",
+ "bus_clk",
+ "core_clk",
+ "iommu_clk",
+ "vsync_clk";
+
+ iommus = <&mdp_smmu 0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ mdp5_intf3_out: endpoint {
+ remote-endpoint = <&hdmi_in>;
+ };
+ };
+ };
+ };
+
+ hdmi: hdmi-tx@9a0000 {
+ compatible = "qcom,hdmi-tx-8996";
+ reg = <0x009a0000 0x50c>,
+ <0x00070000 0x6158>,
+ <0x009e0000 0xfff>;
+ reg-names = "core_physical",
+ "qfprom_physical",
+ "hdcp_physical";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&mmcc MDSS_MDP_CLK>,
+ <&mmcc MDSS_AHB_CLK>,
+ <&mmcc MDSS_HDMI_CLK>,
+ <&mmcc MDSS_HDMI_AHB_CLK>,
+ <&mmcc MDSS_EXTPCLK_CLK>;
+ clock-names =
+ "mdp_core_clk",
+ "iface_clk",
+ "core_clk",
+ "alt_iface_clk",
+ "extp_clk";
+
+ phys = <&hdmi_phy>;
+ phy-names = "hdmi_phy";
+ #sound-dai-cells = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ hdmi_in: endpoint {
+ remote-endpoint = <&mdp5_intf3_out>;
+ };
+ };
+ };
+ };
+
+ hdmi_phy: hdmi-phy@9a0600 {
+ compatible = "qcom,hdmi-phy-8996";
+ reg = <0x9a0600 0x1c4>,
+ <0x9a0a00 0x124>,
+ <0x9a0c00 0x124>,
+ <0x9a0e00 0x124>,
+ <0x9a1000 0x124>,
+ <0x9a1200 0x0c8>;
+ reg-names = "hdmi_pll",
+ "hdmi_tx_l0",
+ "hdmi_tx_l1",
+ "hdmi_tx_l2",
+ "hdmi_tx_l3",
+ "hdmi_phy";
+
+ clocks = <&mmcc MDSS_AHB_CLK>,
+ <&gcc GCC_HDMI_CLKREF_CLK>;
+ clock-names = "iface_clk",
+ "ref_clk";
+ };
+ };
+
+ qcom,sps {
+ compatible = "qcom,msm_sps_4k";
+ qcom,device-type = <3>;
+ qcom,pipe-attr-ee;
+ };
+
+ slim_msm:sc {
+ cell-index = <1>;
+ compatible = "qcom,slim-ngd";
+ reg = <0x91c0000 0x2C000>,
+ <0x9184000 0x32000>;
+ reg-names = "slimbus_physical", "slimbus_bam_physical";
+ interrupts = <0 163 IRQ_TYPE_LEVEL_HIGH>,
+ <0 164 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "slimbus_irq", "slimbus_bam_irq";
+ qcom,apps-ch-pipes = <0x60000000>;
+ qcom,ea-pc = <0x160>;
+
+ tasha_codec: tas {
+ pinctrl-0 = <&cdc_reset_active &wcd_intr_default>;
+ pinctrl-1 = <&cdc_reset_sleep>;
+ pinctrl-names = "default", "sleep";
+ qcom,gpio-int2 = <&msmgpio 54 0>;
+ qcom,cdc-reset-gpio = <&msmgpio 64 0>;
+
+ compatible = "qcom,tasha-slim-pgd";
+ elemental-addr = [00 01 A0 01 17 02];
+
+ clock-names = "mclk", "native";
+ clocks = <&rpmcc RPM_SMD_DIV_CLK1>,
+ <&rpmcc RPM_SMD_BB_CLK1>;
+
+ vdd-buck-supply = <&pm8994_s4>;
+ qcom,cdc-vdd-buck-voltage = <1800000 1800000>;
+ qcom,cdc-vdd-buck-current = <650000>;
+
+ buck-sido-supply = <&pm8994_s4>;
+ qcom,cdc-buck-sido-voltage = <1800000 1800000>;
+ qcom,cdc-buck-sido-current = <250000>;
+
+ vdd-tx-h-supply = <&pm8994_s4>;
+ qcom,cdc-vdd-tx-h-voltage = <1800000 1800000>;
+ qcom,cdc-vdd-tx-h-current = <25000>;
+
+ vdd-rx-h-supply = <&pm8994_s4>;
+ qcom,cdc-vdd-rx-h-voltage = <1800000 1800000>;
+ qcom,cdc-vdd-rx-h-current = <25000>;
+
+ vddpx-1-supply = <&pm8994_s4>;
+ qcom,cdc-vddpx-1-voltage = <1800000 1800000>;
+ qcom,cdc-vddpx-1-current = <10000>;
+
+ qcom,cdc-micbias1-mv = <1800>;
+ qcom,cdc-micbias2-mv = <1800>;
+ qcom,cdc-micbias3-mv = <1800>;
+ qcom,cdc-micbias4-mv = <1800>;
+
+ qcom,cdc-mclk-clk-rate = <9600000>;
+
+ qcom,cdc-slim-ifd = "tasha-slim-ifd";
+ qcom,cdc-slim-ifd-elemental-addr = [00 00 A0 01 17 02];
+
+ qcom,cdc-dmic-sample-rate = <4800000>;
+ qcom,cdc-mad-dmic-rate = <600000>;
+ qcom,clk1-gpio = <&pm8994_gpios 15 0>;
+ wcd9335:wcd {
+ compatible = "qcom,wcd9335";
+ #sound-dai-cells = <1>;
+ };
+ };
+ };
+
+ lpass_q6_smmu: arm,smmu-lpass_q6@1600000 {
+ compatible = "qcom,msm8996-smmu-v2";
+ reg = <0x1600000 0x20000>;
+ #iommu-cells = <1>;
+ power-domains = <&gcc HLOS1_VOTE_LPASS_CORE_GDSC>;
+
+ #global-interrupts = <1>;
+ interrupts = <GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 393 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 394 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 396 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_HLOS1_VOTE_LPASS_CORE_SMMU_CLK>,
+ <&gcc GCC_HLOS1_VOTE_LPASS_ADSP_SMMU_CLK>;
+ clock-names = "iface", "bus";
+ status = "okay";
+ };
+ };
+
+ adsp-pil {
+ compatible = "qcom,msm8996-adsp-pil";
+
+ interrupts-extended = <&intc 0 162 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 3 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog", "fatal", "ready",
+ "handover", "stop-ack";
+
+ clocks = <&xo_board>;
+ clock-names = "xo";
+
+ memory-region = <&adsp_region>;
+
+ qcom,smem-states = <&adsp_smp2p_out 0>;
+ qcom,smem-state-names = "stop";
+
+ smd-edge {
+ interrupts = <GIC_SPI 156 IRQ_TYPE_EDGE_RISING>;
+
+ label = "lpass";
+ qcom,ipc = <&apcs 16 8>;
+ qcom,smd-edge = <1>;
+ qcom,remote-pid = <2>;
+
+ apr {
+ compatible = "qcom,apr-v2";
+ qcom,smd-channels = "apr_audio_svc";
+ qcom,apr-dest-domain-id = <APR_DOMAIN_ADSP>;
+
+ q6core {
+ qcom,apr-svc-name = "CORE";
+ qcom,apr-svc-id = <APR_SVC_ADSP_CORE>;
+ compatible = "qcom,q6core";
+ };
+
+ q6afe: q6afe {
+ compatible = "qcom,q6afe";
+ qcom,apr-svc-name = "AFE";
+ qcom,apr-svc-id = <APR_SVC_AFE>;
+ #sound-dai-cells = <1>;
+ };
+
+ q6asm: q6asm {
+ compatible = "qcom,q6asm";
+ qcom,apr-svc-name = "ASM";
+ qcom,apr-svc-id = <APR_SVC_ASM>;
+ #sound-dai-cells = <1>;
+ };
+
+ q6adm: q6adm {
+ compatible = "qcom,q6adm";
+ qcom,apr-svc-name = "ADM";
+ qcom,apr-svc-id = <APR_SVC_ADM>;
+ #sound-dai-cells = <0>;
+ };
+
+ };
+ };
+ };
+
+ adsp-smp2p {
+ compatible = "qcom,smp2p";
+ qcom,smem = <443>, <429>;
+
+ interrupts = <0 158 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,ipc = <&apcs 16 10>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <2>;
+
+ adsp_smp2p_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ adsp_smp2p_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ modem-smp2p {
+ compatible = "qcom,smp2p";
+ qcom,smem = <435>, <428>;
+
+ interrupts = <GIC_SPI 451 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,ipc = <&apcs 16 14>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <1>;
+
+ modem_smp2p_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ modem_smp2p_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ smp2p-slpi {
+ compatible = "qcom,smp2p";
+ qcom,smem = <481>, <430>;
+
+ interrupts = <GIC_SPI 178 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,ipc = <&apcs 16 26>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <3>;
+
+ slpi_smp2p_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ slpi_smp2p_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+ };
+
+};
+#include "msm8996-pins.dtsi"
+#include "pm8994.dtsi"
+#include "pmi8994.dtsi"
diff --git a/rr-cache/1c6069db9c53a786586220741b0b3dd4006abdea/postimage b/rr-cache/1c6069db9c53a786586220741b0b3dd4006abdea/postimage
new file mode 100644
index 0000000..2178eb9
--- /dev/null
+++ b/rr-cache/1c6069db9c53a786586220741b0b3dd4006abdea/postimage
@@ -0,0 +1,33 @@
+Qualcomm Audio Stream Manager (Q6ASM) binding
+
+Q6ASM is one of the APR audio service on Q6DSP.
+Please refer to qcom,apr.txt for details of the common apr service bindings
+used by the apr service device.
+
+- but must contain the following property:
+
+- compatible:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "qcom,q6asm-v<MAJOR-NUMBER>.<MINOR-NUMBER>".
+ Or "qcom,q6asm" where the version number can be queried
+ from DSP.
+ example "qcom,q6asm-v2.0"
+
+= ASM DAIs (Digial Audio Interface)
+"dais" subnode of the ASM node represents dai specific configuration
+
+- #sound-dai-cells
+ Usage: required
+ Value type: <u32>
+ Definition: Must be 1
+
+= EXAMPLE
+
+q6asm@7 {
+ compatible = "qcom,q6asm";
+ reg = <APR_SVC_ASM>;
+ q6asmdai: dais {
+ #sound-dai-cells = <1>;
+ };
+};
diff --git a/rr-cache/1c6069db9c53a786586220741b0b3dd4006abdea/preimage b/rr-cache/1c6069db9c53a786586220741b0b3dd4006abdea/preimage
new file mode 100644
index 0000000..4d307d1
--- /dev/null
+++ b/rr-cache/1c6069db9c53a786586220741b0b3dd4006abdea/preimage
@@ -0,0 +1,66 @@
+Qualcomm Audio Stream Manager (Q6ASM) binding
+
+Q6ASM is one of the APR audio service on Q6DSP.
+<<<<<<<
+Please refer to qcom,apr.txt for details of the common apr service bindings
+=======
+Please refer to qcom,apr.txt for details of the coommon apr service bindings
+>>>>>>>
+used by the apr service device.
+
+- but must contain the following property:
+
+- compatible:
+ Usage: required
+ Value type: <stringlist>
+<<<<<<<
+ Definition: must be "qcom,asm-v<MAJOR-NUMBER>.<MINOR-NUMBER>".
+ example "qcom,asm-v2.0"
+
+- qcom,apr-svc-id
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Must be 7 for Audio Stream Manager Service.
+
+- qcom,apr-svc-name
+ Usage: required
+ Value type: <stringlist>
+ Definition: Must be "ASM"
+
+- #sound-dai-cells
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Must be 1
+
+
+= EXAMPLE
+
+q6asm: q6asm {
+ compatible = "qcom,q6asm";
+ qcom,apr-svc-name = "ASM";
+ qcom,apr-svc-id = <APR_SVC_ASM>;
+ #sound-dai-cells = <1>;
+=======
+ Definition: must be "qcom,q6asm-v<MAJOR-NUMBER>.<MINOR-NUMBER>".
+ Or "qcom,q6asm" where the version number can be queried
+ from DSP.
+ example "qcom,q6asm-v2.0"
+
+= ASM DAIs (Digial Audio Interface)
+"dais" subnode of the ASM node represents dai specific configuration
+
+- #sound-dai-cells
+ Usage: required
+ Value type: <u32>
+ Definition: Must be 1
+
+= EXAMPLE
+
+q6asm@7 {
+ compatible = "qcom,q6asm";
+ reg = <APR_SVC_ASM>;
+ q6asmdai: dais {
+ #sound-dai-cells = <1>;
+ };
+>>>>>>>
+};
diff --git a/rr-cache/1c6069db9c53a786586220741b0b3dd4006abdea/thisimage b/rr-cache/1c6069db9c53a786586220741b0b3dd4006abdea/thisimage
new file mode 100644
index 0000000..4d307d1
--- /dev/null
+++ b/rr-cache/1c6069db9c53a786586220741b0b3dd4006abdea/thisimage
@@ -0,0 +1,66 @@
+Qualcomm Audio Stream Manager (Q6ASM) binding
+
+Q6ASM is one of the APR audio service on Q6DSP.
+<<<<<<<
+Please refer to qcom,apr.txt for details of the common apr service bindings
+=======
+Please refer to qcom,apr.txt for details of the coommon apr service bindings
+>>>>>>>
+used by the apr service device.
+
+- but must contain the following property:
+
+- compatible:
+ Usage: required
+ Value type: <stringlist>
+<<<<<<<
+ Definition: must be "qcom,asm-v<MAJOR-NUMBER>.<MINOR-NUMBER>".
+ example "qcom,asm-v2.0"
+
+- qcom,apr-svc-id
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Must be 7 for Audio Stream Manager Service.
+
+- qcom,apr-svc-name
+ Usage: required
+ Value type: <stringlist>
+ Definition: Must be "ASM"
+
+- #sound-dai-cells
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Must be 1
+
+
+= EXAMPLE
+
+q6asm: q6asm {
+ compatible = "qcom,q6asm";
+ qcom,apr-svc-name = "ASM";
+ qcom,apr-svc-id = <APR_SVC_ASM>;
+ #sound-dai-cells = <1>;
+=======
+ Definition: must be "qcom,q6asm-v<MAJOR-NUMBER>.<MINOR-NUMBER>".
+ Or "qcom,q6asm" where the version number can be queried
+ from DSP.
+ example "qcom,q6asm-v2.0"
+
+= ASM DAIs (Digial Audio Interface)
+"dais" subnode of the ASM node represents dai specific configuration
+
+- #sound-dai-cells
+ Usage: required
+ Value type: <u32>
+ Definition: Must be 1
+
+= EXAMPLE
+
+q6asm@7 {
+ compatible = "qcom,q6asm";
+ reg = <APR_SVC_ASM>;
+ q6asmdai: dais {
+ #sound-dai-cells = <1>;
+ };
+>>>>>>>
+};
diff --git a/rr-cache/2ae3d854ca8be3633d3b2594ca033a2c9ac35200/postimage b/rr-cache/2ae3d854ca8be3633d3b2594ca033a2c9ac35200/postimage
new file mode 100644
index 0000000..2135b7c
--- /dev/null
+++ b/rr-cache/2ae3d854ca8be3633d3b2594ca033a2c9ac35200/postimage
@@ -0,0 +1,221 @@
+* ARM Generic Interrupt Controller, version 3
+
+AArch64 SMP cores are often associated with a GICv3, providing Private
+Peripheral Interrupts (PPI), Shared Peripheral Interrupts (SPI),
+Software Generated Interrupts (SGI), and Locality-specific Peripheral
+Interrupts (LPI).
+
+Main node required properties:
+
+- compatible : should at least contain "arm,gic-v3".
+- interrupt-controller : Identifies the node as an interrupt controller
+- #interrupt-cells : Specifies the number of cells needed to encode an
+ interrupt source. Must be a single cell with a value of at least 3.
+ If the system requires describing PPI affinity, then the value must
+ be at least 4.
+
+ The 1st cell is the interrupt type; 0 for SPI interrupts, 1 for PPI
+ interrupts. Other values are reserved for future use.
+
+ The 2nd cell contains the interrupt number for the interrupt type.
+ SPI interrupts are in the range [0-987]. PPI interrupts are in the
+ range [0-15].
+
+ The 3rd cell is the flags, encoded as follows:
+ bits[3:0] trigger type and level flags.
+ 1 = edge triggered
+ 4 = level triggered
+
+ The 4th cell is a phandle to a node describing a set of CPUs this
+ interrupt is affine to. The interrupt must be a PPI, and the node
+ pointed must be a subnode of the "ppi-partitions" subnode. For
+ interrupt types other than PPI or PPIs that are not partitionned,
+ this cell must be zero. See the "ppi-partitions" node description
+ below.
+
+ Cells 5 and beyond are reserved for future use and must have a value
+ of 0 if present.
+
+- reg : Specifies base physical address(s) and size of the GIC
+ registers, in the following order:
+ - GIC Distributor interface (GICD)
+ - GIC Redistributors (GICR), one range per redistributor region
+ - GIC CPU interface (GICC)
+ - GIC Hypervisor interface (GICH)
+ - GIC Virtual CPU interface (GICV)
+
+ GICC, GICH and GICV are optional.
+
+- interrupts : Interrupt source of the VGIC maintenance interrupt.
+
+Optional
+
+- redistributor-stride : If using padding pages, specifies the stride
+ of consecutive redistributors. Must be a multiple of 64kB.
+
+- #redistributor-regions: The number of independent contiguous regions
+ occupied by the redistributors. Required if more than one such
+ region is present.
+
+- msi-controller: Boolean property. Identifies the node as an MSI
+ controller. Only present if the Message Based Interrupt
+ functionnality is being exposed by the HW, and the mbi-ranges
+ property present.
+
+- mbi-ranges: A list of pairs <intid span>, where "intid" is the first
+ SPI of a range that can be used an MBI, and "span" the size of that
+ range. Multiple ranges can be provided. Requires "msi-controller" to
+ be set.
+
+- mbi-alias: Address property. Base address of an alias of the GICD
+ region containing only the {SET,CLR}SPI registers to be used if
+ isolation is required, and if supported by the HW.
+
+- msi-controller : Boolean property. Identifies the node as an MSI controller
+
+- arm,spi-ranges : Tuples of GIC SPI interrupt ranges (base, size) indicating
+ SPIs available for use as MSIs.
+
+Sub-nodes:
+
+PPI affinity can be expressed as a single "ppi-partitions" node,
+containing a set of sub-nodes, each with the following property:
+- affinity: Should be a list of phandles to CPU nodes (as described in
+Documentation/devicetree/bindings/arm/cpus.txt).
+
+GICv3 has one or more Interrupt Translation Services (ITS) that are
+used to route Message Signalled Interrupts (MSI) to the CPUs.
+
+These nodes must have the following properties:
+- compatible : Should at least contain "arm,gic-v3-its".
+- msi-controller : Boolean property. Identifies the node as an MSI controller
+- #msi-cells: Must be <1>. The single msi-cell is the DeviceID of the device
+ which will generate the MSI.
+- reg: Specifies the base physical address and size of the ITS
+ registers.
+
+Optional:
+- socionext,synquacer-pre-its: (u32, u32) tuple describing the untranslated
+ address and size of the pre-ITS window.
+
+The main GIC node must contain the appropriate #address-cells,
+#size-cells and ranges properties for the reg property of all ITS
+nodes.
+
+GICv3 has an IMPLEMENTATION DEFINED set of aliases for message-based interrupt
+requests.
+
+These nodes must have the following properties:
+- compatible : Should at least contain one of
+ "qcom,gic-msi-aliases".
+- msi-controller : Boolean property. Identifies the node as an MSI controller
+- #msi-cells: Must be <1>. The single msi-cell is the DeviceID of the device
+ which will generate the MSI.
+- reg: Specifies the base physical address and size of the message-based
+ interrupt request registers.
+- arm,spi-ranges: Tuples of GIC SPI interrupt ranges (base, size) indicating
+ SPIs available for use as MSIs.
+
+Note: The main GIC node must contain the appropriate #address-cells,
+#size-cells and ranges properties for the reg property of all ITS and
+alias message-based interrupt nodes.
+
+Optional:
+- socionext,synquacer-pre-its: (u32, u32) tuple describing the untranslated
+ address and size of the pre-ITS window.
+
+Examples:
+
+ gic: interrupt-controller@2cf00000 {
+ compatible = "arm,gic-v3";
+ #interrupt-cells = <3>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ interrupt-controller;
+ reg = <0x0 0x2f000000 0 0x10000>, // GICD
+ <0x0 0x2f100000 0 0x200000>, // GICR
+ <0x0 0x2c000000 0 0x2000>, // GICC
+ <0x0 0x2c010000 0 0x2000>, // GICH
+ <0x0 0x2c020000 0 0x2000>; // GICV
+ interrupts = <1 9 4>;
+
+ msi-controller;
+ mbi-ranges = <256 128>;
+
+ gic-its@2c200000 {
+ compatible = "arm,gic-v3-its";
+ msi-controller;
+ #msi-cells = <1>;
+ reg = <0x0 0x2c200000 0 0x20000>;
+ };
+ };
+
+ gic: interrupt-controller@2c010000 {
+ compatible = "arm,gic-v3";
+ #interrupt-cells = <4>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ interrupt-controller;
+ redistributor-stride = <0x0 0x40000>; // 256kB stride
+ #redistributor-regions = <2>;
+ reg = <0x0 0x2c010000 0 0x10000>, // GICD
+ <0x0 0x2d000000 0 0x800000>, // GICR 1: CPUs 0-31
+ <0x0 0x2e000000 0 0x800000>; // GICR 2: CPUs 32-63
+ <0x0 0x2c040000 0 0x2000>, // GICC
+ <0x0 0x2c060000 0 0x2000>, // GICH
+ <0x0 0x2c080000 0 0x2000>; // GICV
+ interrupts = <1 9 4>;
+
+ gic-its@2c200000 {
+ compatible = "arm,gic-v3-its";
+ msi-controller;
+ #msi-cells = <1>;
+ reg = <0x0 0x2c200000 0 0x20000>;
+ };
+
+ gic-its@2c400000 {
+ compatible = "arm,gic-v3-its";
+ msi-controller;
+ #msi-cells = <1>;
+ reg = <0x0 0x2c400000 0 0x20000>;
+ };
+
+ ppi-partitions {
+ part0: interrupt-partition-0 {
+ affinity = <&cpu0 &cpu2>;
+ };
+
+ part1: interrupt-partition-1 {
+ affinity = <&cpu1 &cpu3>;
+ };
+ };
+ };
+
+ intc: interrupt-controller@9bc0000 {
+ compatible = "arm,gic-v3";
+ #interrupt-cells = <3>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ interrupt-controller;
+ #redistributor-regions = <1>;
+ redistributor-stride = <0x0 0x40000>;
+ reg = <0x09bc0000 0x10000>,
+ <0x09c00000 0x100000>;
+ interrupts = <1 9 4>;
+
+ msi_alias0: interrupt-controller@9bd0000 {
+ compatible = "qcom,gic-msi-aliases";
+ reg = <0x9bd0000 0x1000>;
+ msi-controller;
+ #msi-cells = <1>;
+ arm,spi-ranges = <544 96>;
+ };
+ };
+
+ device@0 {
+ reg = <0 0 0 4>;
+ interrupts = <1 1 4 &part0>;
+ };
diff --git a/rr-cache/2ae3d854ca8be3633d3b2594ca033a2c9ac35200/preimage b/rr-cache/2ae3d854ca8be3633d3b2594ca033a2c9ac35200/preimage
new file mode 100644
index 0000000..ebfd81f
--- /dev/null
+++ b/rr-cache/2ae3d854ca8be3633d3b2594ca033a2c9ac35200/preimage
@@ -0,0 +1,223 @@
+* ARM Generic Interrupt Controller, version 3
+
+AArch64 SMP cores are often associated with a GICv3, providing Private
+Peripheral Interrupts (PPI), Shared Peripheral Interrupts (SPI),
+Software Generated Interrupts (SGI), and Locality-specific Peripheral
+Interrupts (LPI).
+
+Main node required properties:
+
+- compatible : should at least contain "arm,gic-v3".
+- interrupt-controller : Identifies the node as an interrupt controller
+- #interrupt-cells : Specifies the number of cells needed to encode an
+ interrupt source. Must be a single cell with a value of at least 3.
+ If the system requires describing PPI affinity, then the value must
+ be at least 4.
+
+ The 1st cell is the interrupt type; 0 for SPI interrupts, 1 for PPI
+ interrupts. Other values are reserved for future use.
+
+ The 2nd cell contains the interrupt number for the interrupt type.
+ SPI interrupts are in the range [0-987]. PPI interrupts are in the
+ range [0-15].
+
+ The 3rd cell is the flags, encoded as follows:
+ bits[3:0] trigger type and level flags.
+ 1 = edge triggered
+ 4 = level triggered
+
+ The 4th cell is a phandle to a node describing a set of CPUs this
+ interrupt is affine to. The interrupt must be a PPI, and the node
+ pointed must be a subnode of the "ppi-partitions" subnode. For
+ interrupt types other than PPI or PPIs that are not partitionned,
+ this cell must be zero. See the "ppi-partitions" node description
+ below.
+
+ Cells 5 and beyond are reserved for future use and must have a value
+ of 0 if present.
+
+- reg : Specifies base physical address(s) and size of the GIC
+ registers, in the following order:
+ - GIC Distributor interface (GICD)
+ - GIC Redistributors (GICR), one range per redistributor region
+ - GIC CPU interface (GICC)
+ - GIC Hypervisor interface (GICH)
+ - GIC Virtual CPU interface (GICV)
+
+ GICC, GICH and GICV are optional.
+
+- interrupts : Interrupt source of the VGIC maintenance interrupt.
+
+Optional
+
+- redistributor-stride : If using padding pages, specifies the stride
+ of consecutive redistributors. Must be a multiple of 64kB.
+
+- #redistributor-regions: The number of independent contiguous regions
+ occupied by the redistributors. Required if more than one such
+ region is present.
+
+<<<<<<<
+- msi-controller : Boolean property. Identifies the node as an MSI controller
+
+- arm,spi-ranges : Tuples of GIC SPI interrupt ranges (base, size) indicating
+ SPIs available for use as MSIs.
+=======
+- msi-controller: Boolean property. Identifies the node as an MSI
+ controller. Only present if the Message Based Interrupt
+ functionnality is being exposed by the HW, and the mbi-ranges
+ property present.
+
+- mbi-ranges: A list of pairs <intid span>, where "intid" is the first
+ SPI of a range that can be used an MBI, and "span" the size of that
+ range. Multiple ranges can be provided. Requires "msi-controller" to
+ be set.
+
+- mbi-alias: Address property. Base address of an alias of the GICD
+ region containing only the {SET,CLR}SPI registers to be used if
+ isolation is required, and if supported by the HW.
+>>>>>>>
+
+Sub-nodes:
+
+PPI affinity can be expressed as a single "ppi-partitions" node,
+containing a set of sub-nodes, each with the following property:
+- affinity: Should be a list of phandles to CPU nodes (as described in
+Documentation/devicetree/bindings/arm/cpus.txt).
+
+GICv3 has one or more Interrupt Translation Services (ITS) that are
+used to route Message Signalled Interrupts (MSI) to the CPUs.
+
+These nodes must have the following properties:
+- compatible : Should at least contain "arm,gic-v3-its".
+- msi-controller : Boolean property. Identifies the node as an MSI controller
+- #msi-cells: Must be <1>. The single msi-cell is the DeviceID of the device
+ which will generate the MSI.
+- reg: Specifies the base physical address and size of the ITS
+ registers.
+
+Optional:
+- socionext,synquacer-pre-its: (u32, u32) tuple describing the untranslated
+ address and size of the pre-ITS window.
+
+The main GIC node must contain the appropriate #address-cells,
+#size-cells and ranges properties for the reg property of all ITS
+nodes.
+
+GICv3 has an IMPLEMENTATION DEFINED set of aliases for message-based interrupt
+requests.
+
+These nodes must have the following properties:
+- compatible : Should at least contain one of
+ "qcom,gic-msi-aliases".
+- msi-controller : Boolean property. Identifies the node as an MSI controller
+- #msi-cells: Must be <1>. The single msi-cell is the DeviceID of the device
+ which will generate the MSI.
+- reg: Specifies the base physical address and size of the message-based
+ interrupt request registers.
+- arm,spi-ranges: Tuples of GIC SPI interrupt ranges (base, size) indicating
+ SPIs available for use as MSIs.
+
+Note: The main GIC node must contain the appropriate #address-cells,
+#size-cells and ranges properties for the reg property of all ITS and
+alias message-based interrupt nodes.
+
+Optional:
+- socionext,synquacer-pre-its: (u32, u32) tuple describing the untranslated
+ address and size of the pre-ITS window.
+
+Examples:
+
+ gic: interrupt-controller@2cf00000 {
+ compatible = "arm,gic-v3";
+ #interrupt-cells = <3>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ interrupt-controller;
+ reg = <0x0 0x2f000000 0 0x10000>, // GICD
+ <0x0 0x2f100000 0 0x200000>, // GICR
+ <0x0 0x2c000000 0 0x2000>, // GICC
+ <0x0 0x2c010000 0 0x2000>, // GICH
+ <0x0 0x2c020000 0 0x2000>; // GICV
+ interrupts = <1 9 4>;
+
+ msi-controller;
+ mbi-ranges = <256 128>;
+
+ gic-its@2c200000 {
+ compatible = "arm,gic-v3-its";
+ msi-controller;
+ #msi-cells = <1>;
+ reg = <0x0 0x2c200000 0 0x20000>;
+ };
+ };
+
+ gic: interrupt-controller@2c010000 {
+ compatible = "arm,gic-v3";
+ #interrupt-cells = <4>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ interrupt-controller;
+ redistributor-stride = <0x0 0x40000>; // 256kB stride
+ #redistributor-regions = <2>;
+ reg = <0x0 0x2c010000 0 0x10000>, // GICD
+ <0x0 0x2d000000 0 0x800000>, // GICR 1: CPUs 0-31
+ <0x0 0x2e000000 0 0x800000>; // GICR 2: CPUs 32-63
+ <0x0 0x2c040000 0 0x2000>, // GICC
+ <0x0 0x2c060000 0 0x2000>, // GICH
+ <0x0 0x2c080000 0 0x2000>; // GICV
+ interrupts = <1 9 4>;
+
+ gic-its@2c200000 {
+ compatible = "arm,gic-v3-its";
+ msi-controller;
+ #msi-cells = <1>;
+ reg = <0x0 0x2c200000 0 0x20000>;
+ };
+
+ gic-its@2c400000 {
+ compatible = "arm,gic-v3-its";
+ msi-controller;
+ #msi-cells = <1>;
+ reg = <0x0 0x2c400000 0 0x20000>;
+ };
+
+ ppi-partitions {
+ part0: interrupt-partition-0 {
+ affinity = <&cpu0 &cpu2>;
+ };
+
+ part1: interrupt-partition-1 {
+ affinity = <&cpu1 &cpu3>;
+ };
+ };
+ };
+
+ intc: interrupt-controller@9bc0000 {
+ compatible = "arm,gic-v3";
+ #interrupt-cells = <3>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ interrupt-controller;
+ #redistributor-regions = <1>;
+ redistributor-stride = <0x0 0x40000>;
+ reg = <0x09bc0000 0x10000>,
+ <0x09c00000 0x100000>;
+ interrupts = <1 9 4>;
+
+ msi_alias0: interrupt-controller@9bd0000 {
+ compatible = "qcom,gic-msi-aliases";
+ reg = <0x9bd0000 0x1000>;
+ msi-controller;
+ #msi-cells = <1>;
+ arm,spi-ranges = <544 96>;
+ };
+ };
+
+ device@0 {
+ reg = <0 0 0 4>;
+ interrupts = <1 1 4 &part0>;
+ };
diff --git a/rr-cache/2ae3d854ca8be3633d3b2594ca033a2c9ac35200/thisimage b/rr-cache/2ae3d854ca8be3633d3b2594ca033a2c9ac35200/thisimage
new file mode 100644
index 0000000..ebfd81f
--- /dev/null
+++ b/rr-cache/2ae3d854ca8be3633d3b2594ca033a2c9ac35200/thisimage
@@ -0,0 +1,223 @@
+* ARM Generic Interrupt Controller, version 3
+
+AArch64 SMP cores are often associated with a GICv3, providing Private
+Peripheral Interrupts (PPI), Shared Peripheral Interrupts (SPI),
+Software Generated Interrupts (SGI), and Locality-specific Peripheral
+Interrupts (LPI).
+
+Main node required properties:
+
+- compatible : should at least contain "arm,gic-v3".
+- interrupt-controller : Identifies the node as an interrupt controller
+- #interrupt-cells : Specifies the number of cells needed to encode an
+ interrupt source. Must be a single cell with a value of at least 3.
+ If the system requires describing PPI affinity, then the value must
+ be at least 4.
+
+ The 1st cell is the interrupt type; 0 for SPI interrupts, 1 for PPI
+ interrupts. Other values are reserved for future use.
+
+ The 2nd cell contains the interrupt number for the interrupt type.
+ SPI interrupts are in the range [0-987]. PPI interrupts are in the
+ range [0-15].
+
+ The 3rd cell is the flags, encoded as follows:
+ bits[3:0] trigger type and level flags.
+ 1 = edge triggered
+ 4 = level triggered
+
+ The 4th cell is a phandle to a node describing a set of CPUs this
+ interrupt is affine to. The interrupt must be a PPI, and the node
+ pointed must be a subnode of the "ppi-partitions" subnode. For
+ interrupt types other than PPI or PPIs that are not partitionned,
+ this cell must be zero. See the "ppi-partitions" node description
+ below.
+
+ Cells 5 and beyond are reserved for future use and must have a value
+ of 0 if present.
+
+- reg : Specifies base physical address(s) and size of the GIC
+ registers, in the following order:
+ - GIC Distributor interface (GICD)
+ - GIC Redistributors (GICR), one range per redistributor region
+ - GIC CPU interface (GICC)
+ - GIC Hypervisor interface (GICH)
+ - GIC Virtual CPU interface (GICV)
+
+ GICC, GICH and GICV are optional.
+
+- interrupts : Interrupt source of the VGIC maintenance interrupt.
+
+Optional
+
+- redistributor-stride : If using padding pages, specifies the stride
+ of consecutive redistributors. Must be a multiple of 64kB.
+
+- #redistributor-regions: The number of independent contiguous regions
+ occupied by the redistributors. Required if more than one such
+ region is present.
+
+<<<<<<<
+- msi-controller : Boolean property. Identifies the node as an MSI controller
+
+- arm,spi-ranges : Tuples of GIC SPI interrupt ranges (base, size) indicating
+ SPIs available for use as MSIs.
+=======
+- msi-controller: Boolean property. Identifies the node as an MSI
+ controller. Only present if the Message Based Interrupt
+ functionnality is being exposed by the HW, and the mbi-ranges
+ property present.
+
+- mbi-ranges: A list of pairs <intid span>, where "intid" is the first
+ SPI of a range that can be used an MBI, and "span" the size of that
+ range. Multiple ranges can be provided. Requires "msi-controller" to
+ be set.
+
+- mbi-alias: Address property. Base address of an alias of the GICD
+ region containing only the {SET,CLR}SPI registers to be used if
+ isolation is required, and if supported by the HW.
+>>>>>>>
+
+Sub-nodes:
+
+PPI affinity can be expressed as a single "ppi-partitions" node,
+containing a set of sub-nodes, each with the following property:
+- affinity: Should be a list of phandles to CPU nodes (as described in
+Documentation/devicetree/bindings/arm/cpus.txt).
+
+GICv3 has one or more Interrupt Translation Services (ITS) that are
+used to route Message Signalled Interrupts (MSI) to the CPUs.
+
+These nodes must have the following properties:
+- compatible : Should at least contain "arm,gic-v3-its".
+- msi-controller : Boolean property. Identifies the node as an MSI controller
+- #msi-cells: Must be <1>. The single msi-cell is the DeviceID of the device
+ which will generate the MSI.
+- reg: Specifies the base physical address and size of the ITS
+ registers.
+
+Optional:
+- socionext,synquacer-pre-its: (u32, u32) tuple describing the untranslated
+ address and size of the pre-ITS window.
+
+The main GIC node must contain the appropriate #address-cells,
+#size-cells and ranges properties for the reg property of all ITS
+nodes.
+
+GICv3 has an IMPLEMENTATION DEFINED set of aliases for message-based interrupt
+requests.
+
+These nodes must have the following properties:
+- compatible : Should at least contain one of
+ "qcom,gic-msi-aliases".
+- msi-controller : Boolean property. Identifies the node as an MSI controller
+- #msi-cells: Must be <1>. The single msi-cell is the DeviceID of the device
+ which will generate the MSI.
+- reg: Specifies the base physical address and size of the message-based
+ interrupt request registers.
+- arm,spi-ranges: Tuples of GIC SPI interrupt ranges (base, size) indicating
+ SPIs available for use as MSIs.
+
+Note: The main GIC node must contain the appropriate #address-cells,
+#size-cells and ranges properties for the reg property of all ITS and
+alias message-based interrupt nodes.
+
+Optional:
+- socionext,synquacer-pre-its: (u32, u32) tuple describing the untranslated
+ address and size of the pre-ITS window.
+
+Examples:
+
+ gic: interrupt-controller@2cf00000 {
+ compatible = "arm,gic-v3";
+ #interrupt-cells = <3>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ interrupt-controller;
+ reg = <0x0 0x2f000000 0 0x10000>, // GICD
+ <0x0 0x2f100000 0 0x200000>, // GICR
+ <0x0 0x2c000000 0 0x2000>, // GICC
+ <0x0 0x2c010000 0 0x2000>, // GICH
+ <0x0 0x2c020000 0 0x2000>; // GICV
+ interrupts = <1 9 4>;
+
+ msi-controller;
+ mbi-ranges = <256 128>;
+
+ gic-its@2c200000 {
+ compatible = "arm,gic-v3-its";
+ msi-controller;
+ #msi-cells = <1>;
+ reg = <0x0 0x2c200000 0 0x20000>;
+ };
+ };
+
+ gic: interrupt-controller@2c010000 {
+ compatible = "arm,gic-v3";
+ #interrupt-cells = <4>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ interrupt-controller;
+ redistributor-stride = <0x0 0x40000>; // 256kB stride
+ #redistributor-regions = <2>;
+ reg = <0x0 0x2c010000 0 0x10000>, // GICD
+ <0x0 0x2d000000 0 0x800000>, // GICR 1: CPUs 0-31
+ <0x0 0x2e000000 0 0x800000>; // GICR 2: CPUs 32-63
+ <0x0 0x2c040000 0 0x2000>, // GICC
+ <0x0 0x2c060000 0 0x2000>, // GICH
+ <0x0 0x2c080000 0 0x2000>; // GICV
+ interrupts = <1 9 4>;
+
+ gic-its@2c200000 {
+ compatible = "arm,gic-v3-its";
+ msi-controller;
+ #msi-cells = <1>;
+ reg = <0x0 0x2c200000 0 0x20000>;
+ };
+
+ gic-its@2c400000 {
+ compatible = "arm,gic-v3-its";
+ msi-controller;
+ #msi-cells = <1>;
+ reg = <0x0 0x2c400000 0 0x20000>;
+ };
+
+ ppi-partitions {
+ part0: interrupt-partition-0 {
+ affinity = <&cpu0 &cpu2>;
+ };
+
+ part1: interrupt-partition-1 {
+ affinity = <&cpu1 &cpu3>;
+ };
+ };
+ };
+
+ intc: interrupt-controller@9bc0000 {
+ compatible = "arm,gic-v3";
+ #interrupt-cells = <3>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ interrupt-controller;
+ #redistributor-regions = <1>;
+ redistributor-stride = <0x0 0x40000>;
+ reg = <0x09bc0000 0x10000>,
+ <0x09c00000 0x100000>;
+ interrupts = <1 9 4>;
+
+ msi_alias0: interrupt-controller@9bd0000 {
+ compatible = "qcom,gic-msi-aliases";
+ reg = <0x9bd0000 0x1000>;
+ msi-controller;
+ #msi-cells = <1>;
+ arm,spi-ranges = <544 96>;
+ };
+ };
+
+ device@0 {
+ reg = <0 0 0 4>;
+ interrupts = <1 1 4 &part0>;
+ };
diff --git a/rr-cache/2c69692917e377a3670a7f5157224e007a435365/postimage b/rr-cache/2c69692917e377a3670a7f5157224e007a435365/postimage
new file mode 100644
index 0000000..bc3d7c2
--- /dev/null
+++ b/rr-cache/2c69692917e377a3670a7f5157224e007a435365/postimage
@@ -0,0 +1,522 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SDM845 SoC device tree source
+ *
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+ interrupt-parent = <&intc>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen { };
+
+ memory@80000000 {
+ device_type = "memory";
+ /* We expect the bootloader to fill in the size */
+ reg = <0 0x80000000 0 0>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ memory@85fc0000 {
+ reg = <0 0x85fc0000 0 0x20000>;
+ no-map;
+ };
+
+ memory@85fe0000 {
+ compatible = "qcom,cmd-db";
+ reg = <0x0 0x85fe0000 0x0 0x20000>;
+ no-map;
+ };
+
+ smem_mem: memory@86000000 {
+ reg = <0x0 0x86000000 0x0 0x200000>;
+ no-map;
+ };
+
+ memory@86200000 {
+ reg = <0 0x86200000 0 0x2d00000>;
+ no-map;
+ };
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ CPU0: cpu@0 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ next-level-cache = <&L2_0>;
+ L2_0: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ L3_0: l3-cache {
+ compatible = "cache";
+ };
+ };
+ };
+
+ CPU1: cpu@100 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x100>;
+ enable-method = "psci";
+ next-level-cache = <&L2_100>;
+ L2_100: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU2: cpu@200 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x200>;
+ enable-method = "psci";
+ next-level-cache = <&L2_200>;
+ L2_200: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU3: cpu@300 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x300>;
+ enable-method = "psci";
+ next-level-cache = <&L2_300>;
+ L2_300: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU4: cpu@400 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x400>;
+ enable-method = "psci";
+ next-level-cache = <&L2_400>;
+ L2_400: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU5: cpu@500 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x500>;
+ enable-method = "psci";
+ next-level-cache = <&L2_500>;
+ L2_500: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU6: cpu@600 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x600>;
+ enable-method = "psci";
+ next-level-cache = <&L2_600>;
+ L2_600: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU7: cpu@700 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x700>;
+ enable-method = "psci";
+ next-level-cache = <&L2_700>;
+ L2_700: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 1 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 2 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 3 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 0 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ clocks {
+ xo_board: xo-board {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <38400000>;
+ clock-output-names = "xo_board";
+ };
+
+ sleep_clk: sleep-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32764>;
+ };
+ };
+
+ tcsr_mutex: hwlock {
+ compatible = "qcom,tcsr-mutex";
+ syscon = <&tcsr_mutex_regs 0 0x1000>;
+ #hwlock-cells = <1>;
+ };
+
+ smem {
+ compatible = "qcom,smem";
+ memory-region = <&smem_mem>;
+ hwlocks = <&tcsr_mutex 3>;
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ soc: soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0xffffffff>;
+ compatible = "simple-bus";
+
+ gcc: clock-controller@100000 {
+ compatible = "qcom,gcc-sdm845";
+ reg = <0x100000 0x1f0000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
+ tcsr_mutex_regs: syscon@1f40000 {
+ compatible = "syscon";
+ reg = <0x1f40000 0x40000>;
+ };
+
+ tlmm: pinctrl@3400000 {
+ compatible = "qcom,sdm845-pinctrl";
+ reg = <0x03400000 0xc00000>;
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ ufs_dev_reset_assert: ufs_dev_reset_assert {
+ pins = "ufs_reset";
+ bias-pull-down; /* default: pull down */
+ /*
+ * UFS_RESET driver strengths are having
+ * different values/steps compared to typical
+ * GPIO drive strengths.
+ *
+ * Following table clarifies:
+ *
+ * HDRV value | UFS_RESET | Typical GPIO
+ * (dec) | (mA) | (mA)
+ * 0 | 0.8 | 2
+ * 1 | 1.55 | 4
+ * 2 | 2.35 | 6
+ * 3 | 3.1 | 8
+ * 4 | 3.9 | 10
+ * 5 | 4.65 | 12
+ * 6 | 5.4 | 14
+ * 7 | 6.15 | 16
+ *
+ * POR value for UFS_RESET HDRV is 3 which means
+ * 3.1mA and we want to use that. Hence just
+ * specify 8mA to "drive-strength" binding and
+ * that should result into writing 3 to HDRV
+ * field.
+ */
+ drive-strength = <8>; /* default: 3.1 mA */
+ output-low; /* active low reset */
+ };
+
+ ufs_dev_reset_deassert: ufs_dev_reset_deassert {
+ pins = "ufs_reset";
+ bias-pull-down; /* default: pull down */
+ /*
+ * default: 3.1 mA
+ * check comments under ufs_dev_reset_assert
+ */
+ drive-strength = <8>;
+ output-high; /* active low reset */
+ };
+ };
+
+ spmi_bus: spmi@c440000 {
+ compatible = "qcom,spmi-pmic-arb";
+ reg = <0xc440000 0x1100>,
+ <0xc600000 0x2000000>,
+ <0xe600000 0x100000>,
+ <0xe700000 0xa0000>,
+ <0xc40a000 0x26000>;
+ reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
+ interrupt-names = "periph_irq";
+ interrupts = <GIC_SPI 481 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,ee = <0>;
+ qcom,channel = <0>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ interrupt-controller;
+ #interrupt-cells = <4>;
+ cell-index = <0>;
+ };
+
+ apss_shared: mailbox@17990000 {
+ compatible = "qcom,sdm845-apss-shared";
+ reg = <0x17990000 0x1000>;
+ #mbox-cells = <1>;
+ };
+
+ apps_rsc: rsc@179e000 {
+ label = "apps_rsc";
+ compatible = "qcom,rpmh-rsc";
+ reg = <0x179e0000 0xd00>, <0x179e0d00 0x3000>;
+ reg-names = "drv", "tcs";
+ interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,drv-id = <2>;
+ qcom,tcs-config = <SLEEP_TCS 3>,
+ <WAKE_TCS 3>,
+ <ACTIVE_TCS 2>,
+ <CONTROL_TCS 1>;
+
+ rpmhcc: clock-controller {
+ compatible = "qcom,rpmh-clk-sdm845";
+ #clock-cells = <1>;
+ };
+
+ pm8998-rpmh-regulators {
+ compatible = "qcom,pm8998-rpmh-regulators";
+ qcom,pmic-id = "a";
+
+ pm8998_s1: smps1 {};
+ pm8998_s2: smps2 {};
+ pm8998_s3: smps3 {};
+ pm8998_s4: smps4 {};
+ pm8998_s5: smps5 {};
+ pm8998_s6: smps6 {};
+ pm8998_s7: smps7 {};
+ pm8998_s9: smps9 {};
+ pm8998_l1: ldo1 {};
+ pm8998_l2: ldo2 {};
+ pm8998_l3: ldo3 {};
+ pm8998_l4: ldo4 {};
+ pm8998_l5: ldo5 {};
+ pm8998_l6: ldo6 {};
+ pm8998_l7: ldo7 {};
+ pm8998_l8: ldo8 {};
+ pm8998_l9: ldo9 {};
+ pm8998_l10: ldo10 {};
+ pm8998_l11: ldo11 {};
+ pm8998_l12: ldo12 {};
+ pm8998_l13: ldo13 {};
+ pm8998_l14: ldo14 {};
+ pm8998_l15: ldo15 {};
+ pm8998_l16: ldo16 {};
+ pm8998_l17: ldo17 {};
+ pm8998_l18: ldo18 {};
+ pm8998_l19: ldo19 {};
+ pm8998_l20: ldo20 {};
+ pm8998_l21: ldo21 {};
+ pm8998_l22: ldo22 {};
+ pm8998_l23: ldo23 {};
+ pm8998_l24: ldo24 {};
+ pm8998_l25: ldo25 {};
+ pm8998_l26: ldo26 {};
+ pm8998_l27: ldo27 {};
+ pm8998_l28: ldo28 {};
+ pm8998_lvs1: lvs1 {};
+ pm8998_lvs2: lvs2 {};
+
+ };
+
+ pmi8998-rpmh-regulators {
+ compatible = "qcom,pmi8998-rpmh-regulators";
+ qcom,pmic-id = "b";
+
+ pmi8998_bob: bob {};
+ };
+ };
+
+ intc: interrupt-controller@17a00000 {
+ compatible = "arm,gic-v3";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ reg = <0x17a00000 0x10000>, /* GICD */
+ <0x17a60000 0x100000>; /* GICR * 8 */
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+
+ gic-its@17a40000 {
+ compatible = "arm,gic-v3-its";
+ msi-controller;
+ #msi-cells = <1>;
+ reg = <0x17a40000 0x20000>;
+ status = "disabled";
+ };
+ };
+
+ timer@17c90000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ compatible = "arm,armv7-timer-mem";
+ reg = <0x17c90000 0x1000>;
+
+ frame@17ca0000 {
+ frame-number = <0>;
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17ca0000 0x1000>,
+ <0x17cb0000 0x1000>;
+ };
+
+ frame@17cc0000 {
+ frame-number = <1>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17cc0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17cd0000 {
+ frame-number = <2>;
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17cd0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17ce0000 {
+ frame-number = <3>;
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17ce0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17cf0000 {
+ frame-number = <4>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17cf0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17d00000 {
+ frame-number = <5>;
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17d00000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17d10000 {
+ frame-number = <6>;
+ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17d10000 0x1000>;
+ status = "disabled";
+ };
+ };
+
+ ufshc: ufshc@1d84000 {
+ compatible = "qcom,ufshc";
+ reg = <0x1d84000 0x2500>;
+ interrupts = <0 265 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&ufsphy_0>, <&ufsphy_1>;
+ phy-names = "ufsphy_0", "ufsphy_1";
+
+ lanes-per-direction = <2>;
+ dev-ref-clk-freq = <0>; /* 19.2 MHz */
+
+ power-domains = <&gcc UFS_PHY_GDSC>;
+
+ clock-names =
+ "core_clk",
+ "bus_aggr_clk",
+ "iface_clk",
+ "core_clk_unipro",
+ "ref_clk",
+ "tx_lane0_sync_clk",
+ "rx_lane0_sync_clk",
+ "rx_lane1_sync_clk";
+ clocks =
+ <&gcc GCC_UFS_PHY_AXI_CLK>,
+ <&gcc GCC_AGGRE_UFS_PHY_AXI_CLK>,
+ <&gcc GCC_UFS_PHY_AHB_CLK>,
+ <&gcc GCC_UFS_PHY_UNIPRO_CORE_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_UFS_PHY_TX_SYMBOL_0_CLK>,
+ <&gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>,
+ <&gcc GCC_UFS_PHY_RX_SYMBOL_1_CLK>;
+ freq-table-hz =
+ <50000000 200000000>,
+ <0 0>,
+ <0 0>,
+ <37500000 150000000>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>;
+
+ non-removable;
+ pinctrl-names = "dev-reset-assert", "dev-reset-deassert";
+ pinctrl-0 = <&ufs_dev_reset_assert>;
+ pinctrl-1 = <&ufs_dev_reset_deassert>;
+
+ resets = <&gcc GCC_UFS_PHY_BCR>;
+ reset-names = "core_reset";
+
+ status = "disabled";
+ };
+
+ ufsphy: phy@1d87000 {
+ compatible = "qcom,sdm845-qmp-ufs-phy";
+ reg = <0x1d87000 0x18c>;
+ #clock-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clock-names = "ref_clk_src",
+ "ref_clk",
+ "ref_aux_clk";
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_UFS_MEM_CLKREF_CLK>,
+ <&gcc GCC_UFS_PHY_PHY_AUX_CLK>;
+
+ status = "disabled";
+
+ ufsphy_0: lane@0x1d87400 {
+ reg = <0x1d87400 0x108>,
+ <0x1d87600 0x1e0>,
+ <0x1d87c00 0x1dc>;
+ #phy-cells = <0>;
+ };
+
+ ufsphy_1: lane@0x1d87800 {
+ reg = <0x1d87800 0x108>,
+ <0x1d87a00 0x1e0>,
+ <0x1d87c00 0x1dc>;
+ #phy-cells = <0>;
+ };
+ };
+ };
+};
diff --git a/rr-cache/2c69692917e377a3670a7f5157224e007a435365/preimage b/rr-cache/2c69692917e377a3670a7f5157224e007a435365/preimage
new file mode 100644
index 0000000..c99690b
--- /dev/null
+++ b/rr-cache/2c69692917e377a3670a7f5157224e007a435365/preimage
@@ -0,0 +1,616 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SDM845 SoC device tree source
+ *
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+ interrupt-parent = <&intc>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen { };
+
+ memory@80000000 {
+ device_type = "memory";
+ /* We expect the bootloader to fill in the size */
+ reg = <0 0x80000000 0 0>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ memory@85fc0000 {
+ reg = <0 0x85fc0000 0 0x20000>;
+ no-map;
+ };
+
+ memory@85fe0000 {
+ compatible = "qcom,cmd-db";
+ reg = <0x0 0x85fe0000 0x0 0x20000>;
+ no-map;
+ };
+
+ smem_mem: memory@86000000 {
+ reg = <0x0 0x86000000 0x0 0x200000>;
+ no-map;
+ };
+
+ memory@86200000 {
+ reg = <0 0x86200000 0 0x2d00000>;
+ no-map;
+ };
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ CPU0: cpu@0 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ next-level-cache = <&L2_0>;
+ L2_0: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ L3_0: l3-cache {
+ compatible = "cache";
+ };
+ };
+ };
+
+ CPU1: cpu@100 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x100>;
+ enable-method = "psci";
+ next-level-cache = <&L2_100>;
+ L2_100: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU2: cpu@200 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x200>;
+ enable-method = "psci";
+ next-level-cache = <&L2_200>;
+ L2_200: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU3: cpu@300 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x300>;
+ enable-method = "psci";
+ next-level-cache = <&L2_300>;
+ L2_300: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU4: cpu@400 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x400>;
+ enable-method = "psci";
+ next-level-cache = <&L2_400>;
+ L2_400: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU5: cpu@500 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x500>;
+ enable-method = "psci";
+ next-level-cache = <&L2_500>;
+ L2_500: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU6: cpu@600 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x600>;
+ enable-method = "psci";
+ next-level-cache = <&L2_600>;
+ L2_600: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU7: cpu@700 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x700>;
+ enable-method = "psci";
+ next-level-cache = <&L2_700>;
+ L2_700: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 1 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 2 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 3 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 0 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ clocks {
+ xo_board: xo-board {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <38400000>;
+ clock-output-names = "xo_board";
+ };
+
+ sleep_clk: sleep-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32764>;
+ };
+ };
+
+ tcsr_mutex: hwlock {
+ compatible = "qcom,tcsr-mutex";
+ syscon = <&tcsr_mutex_regs 0 0x1000>;
+ #hwlock-cells = <1>;
+ };
+
+ smem {
+ compatible = "qcom,smem";
+ memory-region = <&smem_mem>;
+ hwlocks = <&tcsr_mutex 3>;
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ soc: soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0xffffffff>;
+ compatible = "simple-bus";
+
+ gcc: clock-controller@100000 {
+ compatible = "qcom,gcc-sdm845";
+ reg = <0x100000 0x1f0000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+<<<<<<<
+=======
+ };
+
+ tcsr_mutex_regs: syscon@1f40000 {
+ compatible = "syscon";
+ reg = <0x1f40000 0x40000>;
+>>>>>>>
+ };
+
+ tlmm: pinctrl@3400000 {
+ compatible = "qcom,sdm845-pinctrl";
+ reg = <0x03400000 0xc00000>;
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+<<<<<<<
+=======
+
+ qup_i2c10_default: qup-i2c10-default {
+ pinmux {
+ function = "qup10";
+ pins = "gpio55", "gpio56";
+ };
+ };
+
+ qup_i2c10_sleep: qup-i2c10-sleep {
+ pinmux {
+ function = "gpio";
+ pins = "gpio55", "gpio56";
+ };
+ };
+
+ qup_uart2_default: qup-uart2-default {
+ pinmux {
+ function = "qup9";
+ pins = "gpio4", "gpio5";
+ };
+ };
+
+ qup_uart2_sleep: qup-uart2-sleep {
+ pinmux {
+ function = "gpio";
+ pins = "gpio4", "gpio5";
+ };
+ };
+
+ ufs_dev_reset_assert: ufs_dev_reset_assert {
+ pins = "ufs_reset";
+ bias-pull-down; /* default: pull down */
+ /*
+ * UFS_RESET driver strengths are having
+ * different values/steps compared to typical
+ * GPIO drive strengths.
+ *
+ * Following table clarifies:
+ *
+ * HDRV value | UFS_RESET | Typical GPIO
+ * (dec) | (mA) | (mA)
+ * 0 | 0.8 | 2
+ * 1 | 1.55 | 4
+ * 2 | 2.35 | 6
+ * 3 | 3.1 | 8
+ * 4 | 3.9 | 10
+ * 5 | 4.65 | 12
+ * 6 | 5.4 | 14
+ * 7 | 6.15 | 16
+ *
+ * POR value for UFS_RESET HDRV is 3 which means
+ * 3.1mA and we want to use that. Hence just
+ * specify 8mA to "drive-strength" binding and
+ * that should result into writing 3 to HDRV
+ * field.
+ */
+ drive-strength = <8>; /* default: 3.1 mA */
+ output-low; /* active low reset */
+ };
+
+ ufs_dev_reset_deassert: ufs_dev_reset_deassert {
+ pins = "ufs_reset";
+ bias-pull-down; /* default: pull down */
+ /*
+ * default: 3.1 mA
+ * check comments under ufs_dev_reset_assert
+ */
+ drive-strength = <8>;
+ output-high; /* active low reset */
+ };
+
+ };
+
+ timer@17c90000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ compatible = "arm,armv7-timer-mem";
+ reg = <0x17c90000 0x1000>;
+
+ frame@17ca0000 {
+ frame-number = <0>;
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17ca0000 0x1000>,
+ <0x17cb0000 0x1000>;
+ };
+
+ frame@17cc0000 {
+ frame-number = <1>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17cc0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17cd0000 {
+ frame-number = <2>;
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17cd0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17ce0000 {
+ frame-number = <3>;
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17ce0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17cf0000 {
+ frame-number = <4>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17cf0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17d00000 {
+ frame-number = <5>;
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17d00000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17d10000 {
+ frame-number = <6>;
+ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17d10000 0x1000>;
+ status = "disabled";
+ };
+>>>>>>>
+ };
+
+ spmi_bus: spmi@c440000 {
+ compatible = "qcom,spmi-pmic-arb";
+ reg = <0xc440000 0x1100>,
+ <0xc600000 0x2000000>,
+ <0xe600000 0x100000>,
+ <0xe700000 0xa0000>,
+ <0xc40a000 0x26000>;
+ reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
+ interrupt-names = "periph_irq";
+ interrupts = <GIC_SPI 481 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,ee = <0>;
+ qcom,channel = <0>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ interrupt-controller;
+ #interrupt-cells = <4>;
+ cell-index = <0>;
+ };
+
+ apss_shared: mailbox@17990000 {
+ compatible = "qcom,sdm845-apss-shared";
+ reg = <0x17990000 0x1000>;
+ #mbox-cells = <1>;
+ };
+
+ apps_rsc: rsc@179e000 {
+ label = "apps_rsc";
+ compatible = "qcom,rpmh-rsc";
+ reg = <0x179e0000 0xd00>, <0x179e0d00 0x3000>;
+ reg-names = "drv", "tcs";
+ interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,drv-id = <2>;
+ qcom,tcs-config = <SLEEP_TCS 3>,
+ <WAKE_TCS 3>,
+ <ACTIVE_TCS 2>,
+ <CONTROL_TCS 1>;
+
+ rpmhcc: clock-controller {
+ compatible = "qcom,rpmh-clk-sdm845";
+ #clock-cells = <1>;
+ };
+
+ pm8998-rpmh-regulators {
+ compatible = "qcom,pm8998-rpmh-regulators";
+ qcom,pmic-id = "a";
+
+ pm8998_s1: smps1 {};
+ pm8998_s2: smps2 {};
+ pm8998_s3: smps3 {};
+ pm8998_s4: smps4 {};
+ pm8998_s5: smps5 {};
+ pm8998_s6: smps6 {};
+ pm8998_s7: smps7 {};
+ pm8998_s9: smps9 {};
+ pm8998_l1: ldo1 {};
+ pm8998_l2: ldo2 {};
+ pm8998_l3: ldo3 {};
+ pm8998_l4: ldo4 {};
+ pm8998_l5: ldo5 {};
+ pm8998_l6: ldo6 {};
+ pm8998_l7: ldo7 {};
+ pm8998_l8: ldo8 {};
+ pm8998_l9: ldo9 {};
+ pm8998_l10: ldo10 {};
+ pm8998_l11: ldo11 {};
+ pm8998_l12: ldo12 {};
+ pm8998_l13: ldo13 {};
+ pm8998_l14: ldo14 {};
+ pm8998_l15: ldo15 {};
+ pm8998_l16: ldo16 {};
+ pm8998_l17: ldo17 {};
+ pm8998_l18: ldo18 {};
+ pm8998_l19: ldo19 {};
+ pm8998_l20: ldo20 {};
+ pm8998_l21: ldo21 {};
+ pm8998_l22: ldo22 {};
+ pm8998_l23: ldo23 {};
+ pm8998_l24: ldo24 {};
+ pm8998_l25: ldo25 {};
+ pm8998_l26: ldo26 {};
+ pm8998_l27: ldo27 {};
+ pm8998_l28: ldo28 {};
+ pm8998_lvs1: lvs1 {};
+ pm8998_lvs2: lvs2 {};
+
+ };
+
+ pmi8998-rpmh-regulators {
+ compatible = "qcom,pmi8998-rpmh-regulators";
+ qcom,pmic-id = "b";
+
+ pmi8998_bob: bob {};
+ };
+ };
+
+<<<<<<<
+ intc: interrupt-controller@17a00000 {
+ compatible = "arm,gic-v3";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ reg = <0x17a00000 0x10000>, /* GICD */
+ <0x17a60000 0x100000>; /* GICR * 8 */
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+
+ gic-its@17a40000 {
+ compatible = "arm,gic-v3-its";
+ msi-controller;
+ #msi-cells = <1>;
+ reg = <0x17a40000 0x20000>;
+ status = "disabled";
+ };
+ };
+
+ timer@17c90000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ compatible = "arm,armv7-timer-mem";
+ reg = <0x17c90000 0x1000>;
+
+ frame@17ca0000 {
+ frame-number = <0>;
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17ca0000 0x1000>,
+ <0x17cb0000 0x1000>;
+ };
+
+ frame@17cc0000 {
+ frame-number = <1>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17cc0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17cd0000 {
+ frame-number = <2>;
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17cd0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17ce0000 {
+ frame-number = <3>;
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17ce0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17cf0000 {
+ frame-number = <4>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17cf0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17d00000 {
+ frame-number = <5>;
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17d00000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17d10000 {
+ frame-number = <6>;
+ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17d10000 0x1000>;
+ status = "disabled";
+ };
+=======
+ phy@1d87000 {
+ compatible = "qcom,sdm845-qmp-ufs-phy";
+ reg = <0x1d87000 0x18c>;
+ #clock-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clock-names = "ref_clk_src",
+ "ref_clk",
+ "ref_aux_clk";
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_UFS_MEM_CLKREF_CLK>,
+ <&gcc GCC_UFS_PHY_PHY_AUX_CLK>;
+
+ status = "disabled";
+
+ ufsphy_0: lane@0x1d87400 {
+ reg = <0x1d87400 0x108>,
+ <0x1d87600 0x1e0>,
+ <0x1d87c00 0x1dc>;
+ #phy-cells = <0>;
+ };
+
+ ufsphy_1: lane@0x1d87800 {
+ reg = <0x1d87800 0x108>,
+ <0x1d87a00 0x1e0>,
+ <0x1d87c00 0x1dc>;
+ #phy-cells = <0>;
+ };
+ };
+
+ ufshc@1d84000 {
+ compatible = "qcom,ufshc";
+ reg = <0x1d84000 0x2500>;
+ interrupts = <0 265 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&ufsphy_0>, <&ufsphy_1>;
+ phy-names = "ufsphy_0", "ufsphy_1";
+
+ lanes-per-direction = <2>;
+ dev-ref-clk-freq = <0>; /* 19.2 MHz */
+
+ power-domains = <&gcc UFS_PHY_GDSC>;
+
+ clock-names =
+ "core_clk",
+ "bus_aggr_clk",
+ "iface_clk",
+ "core_clk_unipro",
+ "ref_clk",
+ "tx_lane0_sync_clk",
+ "rx_lane0_sync_clk",
+ "rx_lane1_sync_clk";
+ clocks =
+ <&gcc GCC_UFS_PHY_AXI_CLK>,
+ <&gcc GCC_AGGRE_UFS_PHY_AXI_CLK>,
+ <&gcc GCC_UFS_PHY_AHB_CLK>,
+ <&gcc GCC_UFS_PHY_UNIPRO_CORE_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_UFS_PHY_TX_SYMBOL_0_CLK>,
+ <&gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>,
+ <&gcc GCC_UFS_PHY_RX_SYMBOL_1_CLK>;
+ freq-table-hz =
+ <50000000 200000000>,
+ <0 0>,
+ <0 0>,
+ <37500000 150000000>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>;
+
+ non-removable;
+ pinctrl-names = "dev-reset-assert", "dev-reset-deassert";
+ pinctrl-0 = <&ufs_dev_reset_assert>;
+ pinctrl-1 = <&ufs_dev_reset_deassert>;
+
+ resets = <&gcc GCC_UFS_PHY_BCR>;
+ reset-names = "core_reset";
+
+ status = "disabled";
+>>>>>>>
+ };
+ };
+};
diff --git a/rr-cache/2e0fbc4890901254069ecb4576f1f83f3c3209e4/preimage b/rr-cache/2e0fbc4890901254069ecb4576f1f83f3c3209e4/preimage
new file mode 100644
index 0000000..abf2650
--- /dev/null
+++ b/rr-cache/2e0fbc4890901254069ecb4576f1f83f3c3209e4/preimage
@@ -0,0 +1,669 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SDM845 SoC device tree source
+ *
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+<<<<<<<
+=======
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/soc/qcom,rpmh-rsc.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+>>>>>>>
+
+/ {
+ interrupt-parent = <&intc>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen { };
+
+ memory@80000000 {
+ device_type = "memory";
+ /* We expect the bootloader to fill in the size */
+ reg = <0 0x80000000 0 0>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ memory@85fc0000 {
+ reg = <0 0x85fc0000 0 0x20000>;
+ no-map;
+ };
+
+ memory@85fe0000 {
+<<<<<<<
+ compatible = "qcom,cmd-db";
+ reg = <0x0 0x85fe0000 0x0 0x20000>;
+ no-map;
+=======
+ reg = <0x0 0x85fe0000 0x0 0x20000>;
+ compatible = "qcom,cmd-db";
+>>>>>>>
+ };
+
+ smem_mem: memory@86000000 {
+ reg = <0x0 0x86000000 0x0 0x200000>;
+ no-map;
+ };
+
+ memory@86200000 {
+ reg = <0 0x86200000 0 0x2d00000>;
+ no-map;
+ };
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ CPU0: cpu@0 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ next-level-cache = <&L2_0>;
+ L2_0: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ L3_0: l3-cache {
+ compatible = "cache";
+ };
+ };
+ };
+
+ CPU1: cpu@100 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x100>;
+ enable-method = "psci";
+ next-level-cache = <&L2_100>;
+ L2_100: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU2: cpu@200 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x200>;
+ enable-method = "psci";
+ next-level-cache = <&L2_200>;
+ L2_200: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU3: cpu@300 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x300>;
+ enable-method = "psci";
+ next-level-cache = <&L2_300>;
+ L2_300: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU4: cpu@400 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x400>;
+ enable-method = "psci";
+ next-level-cache = <&L2_400>;
+ L2_400: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU5: cpu@500 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x500>;
+ enable-method = "psci";
+ next-level-cache = <&L2_500>;
+ L2_500: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU6: cpu@600 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x600>;
+ enable-method = "psci";
+ next-level-cache = <&L2_600>;
+ L2_600: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU7: cpu@700 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x700>;
+ enable-method = "psci";
+ next-level-cache = <&L2_700>;
+ L2_700: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 1 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 2 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 3 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 0 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ clocks {
+ xo_board: xo-board {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+<<<<<<<
+ clock-frequency = <19200000>;
+=======
+ clock-frequency = <38400000>;
+ clock-output-names = "xo_board";
+>>>>>>>
+ };
+
+ sleep_clk: sleep-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32764>;
+ };
+ };
+
+ tcsr_mutex: hwlock {
+ compatible = "qcom,tcsr-mutex";
+ syscon = <&tcsr_mutex_regs 0 0x1000>;
+ #hwlock-cells = <1>;
+ };
+
+ smem {
+ compatible = "qcom,smem";
+ memory-region = <&smem_mem>;
+ hwlocks = <&tcsr_mutex 3>;
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ soc: soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0xffffffff>;
+ compatible = "simple-bus";
+
+<<<<<<<
+=======
+ intc: interrupt-controller@17a00000 {
+ compatible = "arm,gic-v3";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ #redistributor-regions = <1>;
+ redistributor-stride = <0x0 0x20000>;
+ reg = <0x17a00000 0x10000>, /* GICD */
+ <0x17a60000 0x100000>; /* GICR * 8 */
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+
+ gic-its@17a40000 {
+ compatible = "arm,gic-v3-its";
+ msi-controller;
+ #msi-cells = <1>;
+ reg = <0x17a40000 0x20000>;
+ status = "disabled";
+ };
+ };
+
+>>>>>>>
+ gcc: clock-controller@100000 {
+ compatible = "qcom,gcc-sdm845";
+ reg = <0x100000 0x1f0000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
+<<<<<<<
+=======
+ tcsr_mutex_regs: syscon@1f40000 {
+ compatible = "syscon";
+ reg = <0x1f40000 0x40000>;
+ };
+
+>>>>>>>
+ tlmm: pinctrl@3400000 {
+ compatible = "qcom,sdm845-pinctrl";
+ reg = <0x03400000 0xc00000>;
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+<<<<<<<
+ };
+
+ spmi_bus: spmi@c440000 {
+ compatible = "qcom,spmi-pmic-arb";
+ reg = <0xc440000 0x1100>,
+ <0xc600000 0x2000000>,
+ <0xe600000 0x100000>,
+ <0xe700000 0xa0000>,
+ <0xc40a000 0x26000>;
+ reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
+ interrupt-names = "periph_irq";
+ interrupts = <GIC_SPI 481 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,ee = <0>;
+ qcom,channel = <0>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ interrupt-controller;
+ #interrupt-cells = <4>;
+ cell-index = <0>;
+ };
+
+ apss_shared: mailbox@17990000 {
+ compatible = "qcom,sdm845-apss-shared";
+ reg = <0x17990000 0x1000>;
+ #mbox-cells = <1>;
+ };
+
+ intc: interrupt-controller@17a00000 {
+ compatible = "arm,gic-v3";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ reg = <0x17a00000 0x10000>, /* GICD */
+ <0x17a60000 0x100000>; /* GICR * 8 */
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+
+ gic-its@17a40000 {
+ compatible = "arm,gic-v3-its";
+ msi-controller;
+ #msi-cells = <1>;
+ reg = <0x17a40000 0x20000>;
+ status = "disabled";
+ };
+=======
+
+ qup_i2c10_default: qup-i2c10-default {
+ pinmux {
+ function = "qup10";
+ pins = "gpio55", "gpio56";
+ };
+ };
+
+ qup_i2c10_sleep: qup-i2c10-sleep {
+ pinmux {
+ function = "gpio";
+ pins = "gpio55", "gpio56";
+ };
+ };
+
+ qup_uart2_default: qup-uart2-default {
+ pinmux {
+ function = "qup9";
+ pins = "gpio4", "gpio5";
+ };
+ };
+
+ qup_uart2_sleep: qup-uart2-sleep {
+ pinmux {
+ function = "gpio";
+ pins = "gpio4", "gpio5";
+ };
+ };
+
+ ufs_dev_reset_assert: ufs_dev_reset_assert {
+ pins = "ufs_reset";
+ bias-pull-down; /* default: pull down */
+ /*
+ * UFS_RESET driver strengths are having
+ * different values/steps compared to typical
+ * GPIO drive strengths.
+ *
+ * Following table clarifies:
+ *
+ * HDRV value | UFS_RESET | Typical GPIO
+ * (dec) | (mA) | (mA)
+ * 0 | 0.8 | 2
+ * 1 | 1.55 | 4
+ * 2 | 2.35 | 6
+ * 3 | 3.1 | 8
+ * 4 | 3.9 | 10
+ * 5 | 4.65 | 12
+ * 6 | 5.4 | 14
+ * 7 | 6.15 | 16
+ *
+ * POR value for UFS_RESET HDRV is 3 which means
+ * 3.1mA and we want to use that. Hence just
+ * specify 8mA to "drive-strength" binding and
+ * that should result into writing 3 to HDRV
+ * field.
+ */
+ drive-strength = <8>; /* default: 3.1 mA */
+ output-low; /* active low reset */
+ };
+
+ ufs_dev_reset_deassert: ufs_dev_reset_deassert {
+ pins = "ufs_reset";
+ bias-pull-down; /* default: pull down */
+ /*
+ * default: 3.1 mA
+ * check comments under ufs_dev_reset_assert
+ */
+ drive-strength = <8>;
+ output-high; /* active low reset */
+ };
+
+>>>>>>>
+ };
+
+ timer@17c90000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ compatible = "arm,armv7-timer-mem";
+ reg = <0x17c90000 0x1000>;
+
+ frame@17ca0000 {
+ frame-number = <0>;
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17ca0000 0x1000>,
+ <0x17cb0000 0x1000>;
+ };
+
+ frame@17cc0000 {
+ frame-number = <1>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17cc0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17cd0000 {
+ frame-number = <2>;
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17cd0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17ce0000 {
+ frame-number = <3>;
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17ce0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17cf0000 {
+ frame-number = <4>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17cf0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17d00000 {
+ frame-number = <5>;
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17d00000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17d10000 {
+ frame-number = <6>;
+ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17d10000 0x1000>;
+ status = "disabled";
+ };
+ };
+<<<<<<<
+=======
+
+ spmi_bus: spmi@c440000 {
+ compatible = "qcom,spmi-pmic-arb";
+ reg = <0xc440000 0x1100>,
+ <0xc600000 0x2000000>,
+ <0xe600000 0x100000>,
+ <0xe700000 0xa0000>,
+ <0xc40a000 0x26000>;
+ reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
+ interrupt-names = "periph_irq";
+ interrupts = <GIC_SPI 481 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,ee = <0>;
+ qcom,channel = <0>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ interrupt-controller;
+ #interrupt-cells = <4>;
+ cell-index = <0>;
+ };
+
+ geniqup@ac0000 {
+ compatible = "qcom,geni-se-qup";
+ reg = <0xac0000 0x6000>;
+ clock-names = "m-ahb", "s-ahb";
+ clocks = <&gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+ <&gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ status = "disabled";
+
+ uart2: serial@a84000 {
+ compatible = "qcom,geni-debug-uart";
+ reg = <0xa84000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S1_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qup_uart2_default>;
+ pinctrl-1 = <&qup_uart2_sleep>;
+ interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ i2c10: i2c@a88000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0xa88000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S2_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qup_i2c10_default>;
+ pinctrl-1 = <&qup_i2c10_sleep>;
+ interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+ };
+
+ apss_shared: mailbox@17990000 {
+ compatible = "qcom,sdm845-apss-shared";
+ reg = <0x17990000 0x1000>;
+ #mbox-cells = <1>;
+ };
+
+ apps_rsc: rsc@179e000 {
+ label = "apps_rsc";
+ compatible = "qcom,rpmh-rsc";
+ reg = <0x179e0000 0xd00>, <0x179e0d00 0x3000>;
+ reg-names = "drv", "tcs";
+ interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,drv-id = <2>;
+ qcom,tcs-config = <SLEEP_TCS 3>,
+ <WAKE_TCS 3>,
+ <ACTIVE_TCS 2>,
+ <CONTROL_TCS 1>;
+
+ rpmhcc: clock-controller {
+ compatible = "qcom,rpmh-clk-sdm845";
+ #clock-cells = <1>;
+ };
+
+ pm8998-rpmh-regulators {
+ compatible = "qcom,pm8998-rpmh-regulators";
+ qcom,pmic-id = "a";
+
+ pm8998_s1: smps1 {};
+ pm8998_s2: smps2 {};
+ pm8998_s3: smps3 {};
+ pm8998_s4: smps4 {};
+ pm8998_s5: smps5 {};
+ pm8998_s6: smps6 {};
+ pm8998_s7: smps7 {};
+ pm8998_s9: smps9 {};
+ pm8998_l1: ldo1 {};
+ pm8998_l2: ldo2 {};
+ pm8998_l3: ldo3 {};
+ pm8998_l4: ldo4 {};
+ pm8998_l5: ldo5 {};
+ pm8998_l6: ldo6 {};
+ pm8998_l7: ldo7 {};
+ pm8998_l8: ldo8 {};
+ pm8998_l9: ldo9 {};
+ pm8998_l10: ldo10 {};
+ pm8998_l11: ldo11 {};
+ pm8998_l12: ldo12 {};
+ pm8998_l13: ldo13 {};
+ pm8998_l14: ldo14 {};
+ pm8998_l15: ldo15 {};
+ pm8998_l16: ldo16 {};
+ pm8998_l17: ldo17 {};
+ pm8998_l18: ldo18 {};
+ pm8998_l19: ldo19 {};
+ pm8998_l20: ldo20 {};
+ pm8998_l21: ldo21 {};
+ pm8998_l22: ldo22 {};
+ pm8998_l23: ldo23 {};
+ pm8998_l24: ldo24 {};
+ pm8998_l25: ldo25 {};
+ pm8998_l26: ldo26 {};
+ pm8998_l27: ldo27 {};
+ pm8998_l28: ldo28 {};
+ pm8998_lvs1: lvs1 {};
+ pm8998_lvs2: lvs2 {};
+
+ };
+
+ pmi8998-rpmh-regulators {
+ compatible = "qcom,pmi8998-rpmh-regulators";
+ qcom,pmic-id = "b";
+
+ pmi8998_bob: bob {};
+ };
+ };
+
+ phy@1d87000 {
+ compatible = "qcom,sdm845-qmp-ufs-phy";
+ reg = <0x1d87000 0x18c>;
+ #clock-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clock-names = "ref_clk_src",
+ "ref_clk",
+ "ref_aux_clk";
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_UFS_MEM_CLKREF_CLK>,
+ <&gcc GCC_UFS_PHY_PHY_AUX_CLK>;
+
+ status = "disabled";
+
+ ufsphy_0: lane@0x1d87400 {
+ reg = <0x1d87400 0x108>,
+ <0x1d87600 0x1e0>,
+ <0x1d87c00 0x1dc>;
+ #phy-cells = <0>;
+ };
+
+ ufsphy_1: lane@0x1d87800 {
+ reg = <0x1d87800 0x108>,
+ <0x1d87a00 0x1e0>,
+ <0x1d87c00 0x1dc>;
+ #phy-cells = <0>;
+ };
+ };
+
+ ufshc@1d84000 {
+ compatible = "qcom,ufshc";
+ reg = <0x1d84000 0x2500>;
+ interrupts = <0 265 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&ufsphy_0>, <&ufsphy_1>;
+ phy-names = "ufsphy_0", "ufsphy_1";
+
+ lanes-per-direction = <2>;
+ dev-ref-clk-freq = <0>; /* 19.2 MHz */
+
+ power-domains = <&gcc UFS_PHY_GDSC>;
+
+ clock-names =
+ "core_clk",
+ "bus_aggr_clk",
+ "iface_clk",
+ "core_clk_unipro",
+ "ref_clk",
+ "tx_lane0_sync_clk",
+ "rx_lane0_sync_clk",
+ "rx_lane1_sync_clk";
+ clocks =
+ <&gcc GCC_UFS_PHY_AXI_CLK>,
+ <&gcc GCC_AGGRE_UFS_PHY_AXI_CLK>,
+ <&gcc GCC_UFS_PHY_AHB_CLK>,
+ <&gcc GCC_UFS_PHY_UNIPRO_CORE_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_UFS_PHY_TX_SYMBOL_0_CLK>,
+ <&gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>,
+ <&gcc GCC_UFS_PHY_RX_SYMBOL_1_CLK>;
+ freq-table-hz =
+ <50000000 200000000>,
+ <0 0>,
+ <0 0>,
+ <37500000 150000000>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>;
+
+ non-removable;
+ pinctrl-names = "dev-reset-assert", "dev-reset-deassert";
+ pinctrl-0 = <&ufs_dev_reset_assert>;
+ pinctrl-1 = <&ufs_dev_reset_deassert>;
+
+ resets = <&gcc GCC_UFS_PHY_BCR>;
+ reset-names = "core_reset";
+
+ status = "disabled";
+ };
+
+ tcsr_mutex_regs: syscon@1f40000 {
+ compatible = "syscon";
+ reg = <0x1f40000 0x40000>;
+ };
+>>>>>>>
+ };
+};
diff --git a/rr-cache/2e0fbc4890901254069ecb4576f1f83f3c3209e4/preimage.1 b/rr-cache/2e0fbc4890901254069ecb4576f1f83f3c3209e4/preimage.1
new file mode 100644
index 0000000..abf2650
--- /dev/null
+++ b/rr-cache/2e0fbc4890901254069ecb4576f1f83f3c3209e4/preimage.1
@@ -0,0 +1,669 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SDM845 SoC device tree source
+ *
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+<<<<<<<
+=======
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/soc/qcom,rpmh-rsc.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+>>>>>>>
+
+/ {
+ interrupt-parent = <&intc>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen { };
+
+ memory@80000000 {
+ device_type = "memory";
+ /* We expect the bootloader to fill in the size */
+ reg = <0 0x80000000 0 0>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ memory@85fc0000 {
+ reg = <0 0x85fc0000 0 0x20000>;
+ no-map;
+ };
+
+ memory@85fe0000 {
+<<<<<<<
+ compatible = "qcom,cmd-db";
+ reg = <0x0 0x85fe0000 0x0 0x20000>;
+ no-map;
+=======
+ reg = <0x0 0x85fe0000 0x0 0x20000>;
+ compatible = "qcom,cmd-db";
+>>>>>>>
+ };
+
+ smem_mem: memory@86000000 {
+ reg = <0x0 0x86000000 0x0 0x200000>;
+ no-map;
+ };
+
+ memory@86200000 {
+ reg = <0 0x86200000 0 0x2d00000>;
+ no-map;
+ };
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ CPU0: cpu@0 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ next-level-cache = <&L2_0>;
+ L2_0: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ L3_0: l3-cache {
+ compatible = "cache";
+ };
+ };
+ };
+
+ CPU1: cpu@100 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x100>;
+ enable-method = "psci";
+ next-level-cache = <&L2_100>;
+ L2_100: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU2: cpu@200 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x200>;
+ enable-method = "psci";
+ next-level-cache = <&L2_200>;
+ L2_200: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU3: cpu@300 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x300>;
+ enable-method = "psci";
+ next-level-cache = <&L2_300>;
+ L2_300: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU4: cpu@400 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x400>;
+ enable-method = "psci";
+ next-level-cache = <&L2_400>;
+ L2_400: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU5: cpu@500 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x500>;
+ enable-method = "psci";
+ next-level-cache = <&L2_500>;
+ L2_500: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU6: cpu@600 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x600>;
+ enable-method = "psci";
+ next-level-cache = <&L2_600>;
+ L2_600: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU7: cpu@700 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x700>;
+ enable-method = "psci";
+ next-level-cache = <&L2_700>;
+ L2_700: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 1 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 2 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 3 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 0 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ clocks {
+ xo_board: xo-board {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+<<<<<<<
+ clock-frequency = <19200000>;
+=======
+ clock-frequency = <38400000>;
+ clock-output-names = "xo_board";
+>>>>>>>
+ };
+
+ sleep_clk: sleep-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32764>;
+ };
+ };
+
+ tcsr_mutex: hwlock {
+ compatible = "qcom,tcsr-mutex";
+ syscon = <&tcsr_mutex_regs 0 0x1000>;
+ #hwlock-cells = <1>;
+ };
+
+ smem {
+ compatible = "qcom,smem";
+ memory-region = <&smem_mem>;
+ hwlocks = <&tcsr_mutex 3>;
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ soc: soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0xffffffff>;
+ compatible = "simple-bus";
+
+<<<<<<<
+=======
+ intc: interrupt-controller@17a00000 {
+ compatible = "arm,gic-v3";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ #redistributor-regions = <1>;
+ redistributor-stride = <0x0 0x20000>;
+ reg = <0x17a00000 0x10000>, /* GICD */
+ <0x17a60000 0x100000>; /* GICR * 8 */
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+
+ gic-its@17a40000 {
+ compatible = "arm,gic-v3-its";
+ msi-controller;
+ #msi-cells = <1>;
+ reg = <0x17a40000 0x20000>;
+ status = "disabled";
+ };
+ };
+
+>>>>>>>
+ gcc: clock-controller@100000 {
+ compatible = "qcom,gcc-sdm845";
+ reg = <0x100000 0x1f0000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
+<<<<<<<
+=======
+ tcsr_mutex_regs: syscon@1f40000 {
+ compatible = "syscon";
+ reg = <0x1f40000 0x40000>;
+ };
+
+>>>>>>>
+ tlmm: pinctrl@3400000 {
+ compatible = "qcom,sdm845-pinctrl";
+ reg = <0x03400000 0xc00000>;
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+<<<<<<<
+ };
+
+ spmi_bus: spmi@c440000 {
+ compatible = "qcom,spmi-pmic-arb";
+ reg = <0xc440000 0x1100>,
+ <0xc600000 0x2000000>,
+ <0xe600000 0x100000>,
+ <0xe700000 0xa0000>,
+ <0xc40a000 0x26000>;
+ reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
+ interrupt-names = "periph_irq";
+ interrupts = <GIC_SPI 481 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,ee = <0>;
+ qcom,channel = <0>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ interrupt-controller;
+ #interrupt-cells = <4>;
+ cell-index = <0>;
+ };
+
+ apss_shared: mailbox@17990000 {
+ compatible = "qcom,sdm845-apss-shared";
+ reg = <0x17990000 0x1000>;
+ #mbox-cells = <1>;
+ };
+
+ intc: interrupt-controller@17a00000 {
+ compatible = "arm,gic-v3";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ reg = <0x17a00000 0x10000>, /* GICD */
+ <0x17a60000 0x100000>; /* GICR * 8 */
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+
+ gic-its@17a40000 {
+ compatible = "arm,gic-v3-its";
+ msi-controller;
+ #msi-cells = <1>;
+ reg = <0x17a40000 0x20000>;
+ status = "disabled";
+ };
+=======
+
+ qup_i2c10_default: qup-i2c10-default {
+ pinmux {
+ function = "qup10";
+ pins = "gpio55", "gpio56";
+ };
+ };
+
+ qup_i2c10_sleep: qup-i2c10-sleep {
+ pinmux {
+ function = "gpio";
+ pins = "gpio55", "gpio56";
+ };
+ };
+
+ qup_uart2_default: qup-uart2-default {
+ pinmux {
+ function = "qup9";
+ pins = "gpio4", "gpio5";
+ };
+ };
+
+ qup_uart2_sleep: qup-uart2-sleep {
+ pinmux {
+ function = "gpio";
+ pins = "gpio4", "gpio5";
+ };
+ };
+
+ ufs_dev_reset_assert: ufs_dev_reset_assert {
+ pins = "ufs_reset";
+ bias-pull-down; /* default: pull down */
+ /*
+ * UFS_RESET driver strengths are having
+ * different values/steps compared to typical
+ * GPIO drive strengths.
+ *
+ * Following table clarifies:
+ *
+ * HDRV value | UFS_RESET | Typical GPIO
+ * (dec) | (mA) | (mA)
+ * 0 | 0.8 | 2
+ * 1 | 1.55 | 4
+ * 2 | 2.35 | 6
+ * 3 | 3.1 | 8
+ * 4 | 3.9 | 10
+ * 5 | 4.65 | 12
+ * 6 | 5.4 | 14
+ * 7 | 6.15 | 16
+ *
+ * POR value for UFS_RESET HDRV is 3 which means
+ * 3.1mA and we want to use that. Hence just
+ * specify 8mA to "drive-strength" binding and
+ * that should result into writing 3 to HDRV
+ * field.
+ */
+ drive-strength = <8>; /* default: 3.1 mA */
+ output-low; /* active low reset */
+ };
+
+ ufs_dev_reset_deassert: ufs_dev_reset_deassert {
+ pins = "ufs_reset";
+ bias-pull-down; /* default: pull down */
+ /*
+ * default: 3.1 mA
+ * check comments under ufs_dev_reset_assert
+ */
+ drive-strength = <8>;
+ output-high; /* active low reset */
+ };
+
+>>>>>>>
+ };
+
+ timer@17c90000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ compatible = "arm,armv7-timer-mem";
+ reg = <0x17c90000 0x1000>;
+
+ frame@17ca0000 {
+ frame-number = <0>;
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17ca0000 0x1000>,
+ <0x17cb0000 0x1000>;
+ };
+
+ frame@17cc0000 {
+ frame-number = <1>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17cc0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17cd0000 {
+ frame-number = <2>;
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17cd0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17ce0000 {
+ frame-number = <3>;
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17ce0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17cf0000 {
+ frame-number = <4>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17cf0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17d00000 {
+ frame-number = <5>;
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17d00000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17d10000 {
+ frame-number = <6>;
+ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17d10000 0x1000>;
+ status = "disabled";
+ };
+ };
+<<<<<<<
+=======
+
+ spmi_bus: spmi@c440000 {
+ compatible = "qcom,spmi-pmic-arb";
+ reg = <0xc440000 0x1100>,
+ <0xc600000 0x2000000>,
+ <0xe600000 0x100000>,
+ <0xe700000 0xa0000>,
+ <0xc40a000 0x26000>;
+ reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
+ interrupt-names = "periph_irq";
+ interrupts = <GIC_SPI 481 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,ee = <0>;
+ qcom,channel = <0>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ interrupt-controller;
+ #interrupt-cells = <4>;
+ cell-index = <0>;
+ };
+
+ geniqup@ac0000 {
+ compatible = "qcom,geni-se-qup";
+ reg = <0xac0000 0x6000>;
+ clock-names = "m-ahb", "s-ahb";
+ clocks = <&gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+ <&gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ status = "disabled";
+
+ uart2: serial@a84000 {
+ compatible = "qcom,geni-debug-uart";
+ reg = <0xa84000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S1_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qup_uart2_default>;
+ pinctrl-1 = <&qup_uart2_sleep>;
+ interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ i2c10: i2c@a88000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0xa88000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S2_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qup_i2c10_default>;
+ pinctrl-1 = <&qup_i2c10_sleep>;
+ interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+ };
+
+ apss_shared: mailbox@17990000 {
+ compatible = "qcom,sdm845-apss-shared";
+ reg = <0x17990000 0x1000>;
+ #mbox-cells = <1>;
+ };
+
+ apps_rsc: rsc@179e000 {
+ label = "apps_rsc";
+ compatible = "qcom,rpmh-rsc";
+ reg = <0x179e0000 0xd00>, <0x179e0d00 0x3000>;
+ reg-names = "drv", "tcs";
+ interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,drv-id = <2>;
+ qcom,tcs-config = <SLEEP_TCS 3>,
+ <WAKE_TCS 3>,
+ <ACTIVE_TCS 2>,
+ <CONTROL_TCS 1>;
+
+ rpmhcc: clock-controller {
+ compatible = "qcom,rpmh-clk-sdm845";
+ #clock-cells = <1>;
+ };
+
+ pm8998-rpmh-regulators {
+ compatible = "qcom,pm8998-rpmh-regulators";
+ qcom,pmic-id = "a";
+
+ pm8998_s1: smps1 {};
+ pm8998_s2: smps2 {};
+ pm8998_s3: smps3 {};
+ pm8998_s4: smps4 {};
+ pm8998_s5: smps5 {};
+ pm8998_s6: smps6 {};
+ pm8998_s7: smps7 {};
+ pm8998_s9: smps9 {};
+ pm8998_l1: ldo1 {};
+ pm8998_l2: ldo2 {};
+ pm8998_l3: ldo3 {};
+ pm8998_l4: ldo4 {};
+ pm8998_l5: ldo5 {};
+ pm8998_l6: ldo6 {};
+ pm8998_l7: ldo7 {};
+ pm8998_l8: ldo8 {};
+ pm8998_l9: ldo9 {};
+ pm8998_l10: ldo10 {};
+ pm8998_l11: ldo11 {};
+ pm8998_l12: ldo12 {};
+ pm8998_l13: ldo13 {};
+ pm8998_l14: ldo14 {};
+ pm8998_l15: ldo15 {};
+ pm8998_l16: ldo16 {};
+ pm8998_l17: ldo17 {};
+ pm8998_l18: ldo18 {};
+ pm8998_l19: ldo19 {};
+ pm8998_l20: ldo20 {};
+ pm8998_l21: ldo21 {};
+ pm8998_l22: ldo22 {};
+ pm8998_l23: ldo23 {};
+ pm8998_l24: ldo24 {};
+ pm8998_l25: ldo25 {};
+ pm8998_l26: ldo26 {};
+ pm8998_l27: ldo27 {};
+ pm8998_l28: ldo28 {};
+ pm8998_lvs1: lvs1 {};
+ pm8998_lvs2: lvs2 {};
+
+ };
+
+ pmi8998-rpmh-regulators {
+ compatible = "qcom,pmi8998-rpmh-regulators";
+ qcom,pmic-id = "b";
+
+ pmi8998_bob: bob {};
+ };
+ };
+
+ phy@1d87000 {
+ compatible = "qcom,sdm845-qmp-ufs-phy";
+ reg = <0x1d87000 0x18c>;
+ #clock-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clock-names = "ref_clk_src",
+ "ref_clk",
+ "ref_aux_clk";
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_UFS_MEM_CLKREF_CLK>,
+ <&gcc GCC_UFS_PHY_PHY_AUX_CLK>;
+
+ status = "disabled";
+
+ ufsphy_0: lane@0x1d87400 {
+ reg = <0x1d87400 0x108>,
+ <0x1d87600 0x1e0>,
+ <0x1d87c00 0x1dc>;
+ #phy-cells = <0>;
+ };
+
+ ufsphy_1: lane@0x1d87800 {
+ reg = <0x1d87800 0x108>,
+ <0x1d87a00 0x1e0>,
+ <0x1d87c00 0x1dc>;
+ #phy-cells = <0>;
+ };
+ };
+
+ ufshc@1d84000 {
+ compatible = "qcom,ufshc";
+ reg = <0x1d84000 0x2500>;
+ interrupts = <0 265 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&ufsphy_0>, <&ufsphy_1>;
+ phy-names = "ufsphy_0", "ufsphy_1";
+
+ lanes-per-direction = <2>;
+ dev-ref-clk-freq = <0>; /* 19.2 MHz */
+
+ power-domains = <&gcc UFS_PHY_GDSC>;
+
+ clock-names =
+ "core_clk",
+ "bus_aggr_clk",
+ "iface_clk",
+ "core_clk_unipro",
+ "ref_clk",
+ "tx_lane0_sync_clk",
+ "rx_lane0_sync_clk",
+ "rx_lane1_sync_clk";
+ clocks =
+ <&gcc GCC_UFS_PHY_AXI_CLK>,
+ <&gcc GCC_AGGRE_UFS_PHY_AXI_CLK>,
+ <&gcc GCC_UFS_PHY_AHB_CLK>,
+ <&gcc GCC_UFS_PHY_UNIPRO_CORE_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_UFS_PHY_TX_SYMBOL_0_CLK>,
+ <&gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>,
+ <&gcc GCC_UFS_PHY_RX_SYMBOL_1_CLK>;
+ freq-table-hz =
+ <50000000 200000000>,
+ <0 0>,
+ <0 0>,
+ <37500000 150000000>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>;
+
+ non-removable;
+ pinctrl-names = "dev-reset-assert", "dev-reset-deassert";
+ pinctrl-0 = <&ufs_dev_reset_assert>;
+ pinctrl-1 = <&ufs_dev_reset_deassert>;
+
+ resets = <&gcc GCC_UFS_PHY_BCR>;
+ reset-names = "core_reset";
+
+ status = "disabled";
+ };
+
+ tcsr_mutex_regs: syscon@1f40000 {
+ compatible = "syscon";
+ reg = <0x1f40000 0x40000>;
+ };
+>>>>>>>
+ };
+};
diff --git a/rr-cache/2fc1266133748afcc7d0159d2f427f62bc0fb28c/postimage b/rr-cache/2fc1266133748afcc7d0159d2f427f62bc0fb28c/postimage
new file mode 100644
index 0000000..12fd691
--- /dev/null
+++ b/rr-cache/2fc1266133748afcc7d0159d2f427f62bc0fb28c/postimage
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_QCOM_GENI_SE) += qcom-geni-se.o
+obj-$(CONFIG_QCOM_COMMAND_DB) += cmd-db.o
+obj-$(CONFIG_QCOM_GLINK_SSR) += glink_ssr.o
+obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
+obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o
+obj-$(CONFIG_QCOM_PM) += spm.o
+obj-$(CONFIG_QCOM_QMI_HELPERS) += qmi_helpers.o
+qmi_helpers-y += qmi_encdec.o qmi_interface.o
+obj-$(CONFIG_QCOM_RMTFS_MEM) += rmtfs_mem.o
+obj-$(CONFIG_QCOM_SMD_RPM) += smd-rpm.o
+obj-$(CONFIG_QCOM_SMEM) += smem.o
+obj-$(CONFIG_QCOM_SMEM_STATE) += smem_state.o
+obj-$(CONFIG_QCOM_SMP2P) += smp2p.o
+obj-$(CONFIG_QCOM_SMSM) += smsm.o
+obj-$(CONFIG_QCOM_WCNSS_CTRL) += wcnss_ctrl.o
+obj-$(CONFIG_ARCH_MSM8996) += kryo-l2-accessors.o
+obj-$(CONFIG_QCOM_APR) += apr.o
diff --git a/rr-cache/2fc1266133748afcc7d0159d2f427f62bc0fb28c/preimage b/rr-cache/2fc1266133748afcc7d0159d2f427f62bc0fb28c/preimage
new file mode 100644
index 0000000..c6a7afb
--- /dev/null
+++ b/rr-cache/2fc1266133748afcc7d0159d2f427f62bc0fb28c/preimage
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_QCOM_GENI_SE) += qcom-geni-se.o
+obj-$(CONFIG_QCOM_COMMAND_DB) += cmd-db.o
+obj-$(CONFIG_QCOM_GLINK_SSR) += glink_ssr.o
+obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
+obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o
+obj-$(CONFIG_QCOM_PM) += spm.o
+obj-$(CONFIG_QCOM_QMI_HELPERS) += qmi_helpers.o
+qmi_helpers-y += qmi_encdec.o qmi_interface.o
+obj-$(CONFIG_QCOM_RMTFS_MEM) += rmtfs_mem.o
+obj-$(CONFIG_QCOM_SMD_RPM) += smd-rpm.o
+obj-$(CONFIG_QCOM_SMEM) += smem.o
+obj-$(CONFIG_QCOM_SMEM_STATE) += smem_state.o
+obj-$(CONFIG_QCOM_SMP2P) += smp2p.o
+obj-$(CONFIG_QCOM_SMSM) += smsm.o
+obj-$(CONFIG_QCOM_WCNSS_CTRL) += wcnss_ctrl.o
+<<<<<<<
+=======
+obj-$(CONFIG_ARCH_MSM8996) += kryo-l2-accessors.o
+>>>>>>>
+obj-$(CONFIG_QCOM_APR) += apr.o
diff --git a/rr-cache/2fc1266133748afcc7d0159d2f427f62bc0fb28c/thisimage b/rr-cache/2fc1266133748afcc7d0159d2f427f62bc0fb28c/thisimage
new file mode 100644
index 0000000..c6a7afb
--- /dev/null
+++ b/rr-cache/2fc1266133748afcc7d0159d2f427f62bc0fb28c/thisimage
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_QCOM_GENI_SE) += qcom-geni-se.o
+obj-$(CONFIG_QCOM_COMMAND_DB) += cmd-db.o
+obj-$(CONFIG_QCOM_GLINK_SSR) += glink_ssr.o
+obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
+obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o
+obj-$(CONFIG_QCOM_PM) += spm.o
+obj-$(CONFIG_QCOM_QMI_HELPERS) += qmi_helpers.o
+qmi_helpers-y += qmi_encdec.o qmi_interface.o
+obj-$(CONFIG_QCOM_RMTFS_MEM) += rmtfs_mem.o
+obj-$(CONFIG_QCOM_SMD_RPM) += smd-rpm.o
+obj-$(CONFIG_QCOM_SMEM) += smem.o
+obj-$(CONFIG_QCOM_SMEM_STATE) += smem_state.o
+obj-$(CONFIG_QCOM_SMP2P) += smp2p.o
+obj-$(CONFIG_QCOM_SMSM) += smsm.o
+obj-$(CONFIG_QCOM_WCNSS_CTRL) += wcnss_ctrl.o
+<<<<<<<
+=======
+obj-$(CONFIG_ARCH_MSM8996) += kryo-l2-accessors.o
+>>>>>>>
+obj-$(CONFIG_QCOM_APR) += apr.o
diff --git a/rr-cache/3152d54bb8498f5b3a678c7d7e9641f795736fa8/postimage b/rr-cache/3152d54bb8498f5b3a678c7d7e9641f795736fa8/postimage
new file mode 100644
index 0000000..44c9479
--- /dev/null
+++ b/rr-cache/3152d54bb8498f5b3a678c7d7e9641f795736fa8/postimage
@@ -0,0 +1,757 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_NUMA_BALANCING=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_USER_NS=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_JUMP_LABEL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_ARCH_SUNXI=y
+CONFIG_ARCH_ALPINE=y
+CONFIG_ARCH_BCM2835=y
+CONFIG_ARCH_BCM_IPROC=y
+CONFIG_ARCH_BERLIN=y
+CONFIG_ARCH_BRCMSTB=y
+CONFIG_ARCH_EXYNOS=y
+CONFIG_ARCH_LAYERSCAPE=y
+CONFIG_ARCH_LG1K=y
+CONFIG_ARCH_HISI=y
+CONFIG_ARCH_MEDIATEK=y
+CONFIG_ARCH_MESON=y
+CONFIG_ARCH_MVEBU=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_MSM8996=y
+CONFIG_ARCH_ROCKCHIP=y
+CONFIG_ARCH_SEATTLE=y
+CONFIG_ARCH_RENESAS=y
+CONFIG_ARCH_R8A7795=y
+CONFIG_ARCH_R8A7796=y
+CONFIG_ARCH_R8A77965=y
+CONFIG_ARCH_R8A77970=y
+CONFIG_ARCH_R8A77980=y
+CONFIG_ARCH_R8A77990=y
+CONFIG_ARCH_R8A77995=y
+CONFIG_ARCH_STRATIX10=y
+CONFIG_ARCH_TEGRA=y
+CONFIG_ARCH_SPRD=y
+CONFIG_ARCH_SYNQUACER=y
+CONFIG_ARCH_THUNDER=y
+CONFIG_ARCH_THUNDER2=y
+CONFIG_ARCH_UNIPHIER=y
+CONFIG_ARCH_VEXPRESS=y
+CONFIG_ARCH_XGENE=y
+CONFIG_ARCH_ZX=y
+CONFIG_ARCH_ZYNQMP=y
+CONFIG_PCI=y
+CONFIG_HOTPLUG_PCI_PCIE=y
+CONFIG_PCI_IOV=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_ACPI=y
+CONFIG_PCI_LAYERSCAPE=y
+CONFIG_PCI_HISI=y
+CONFIG_PCIE_QCOM=y
+CONFIG_PCIE_KIRIN=y
+CONFIG_PCIE_ARMADA_8K=y
+CONFIG_PCIE_HISI_STB=y
+CONFIG_PCI_AARDVARK=y
+CONFIG_PCI_TEGRA=y
+CONFIG_PCIE_RCAR=y
+CONFIG_PCIE_ROCKCHIP=y
+CONFIG_PCIE_ROCKCHIP_HOST=m
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PCI_XGENE=y
+CONFIG_PCI_HOST_THUNDER_PEM=y
+CONFIG_PCI_HOST_THUNDER_ECAM=y
+CONFIG_ARM64_VA_BITS_48=y
+CONFIG_SCHED_MC=y
+CONFIG_NUMA=y
+CONFIG_PREEMPT=y
+CONFIG_KSM=y
+CONFIG_MEMORY_FAILURE=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CMA=y
+CONFIG_SECCOMP=y
+CONFIG_KEXEC=y
+CONFIG_CRASH_DUMP=y
+CONFIG_XEN=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_COMPAT=y
+CONFIG_HIBERNATION=y
+CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_ATTR_SET=y
+CONFIG_CPU_FREQ_GOV_COMMON=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=m
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPUFREQ_DT=y
+CONFIG_ARM_ARMADA_37XX_CPUFREQ=y
+CONFIG_ARM_BIG_LITTLE_CPUFREQ=y
+CONFIG_ARM_SCPI_CPUFREQ=y
+CONFIG_ARM_TEGRA186_CPUFREQ=y
+CONFIG_ACPI_CPPC_CPUFREQ=m
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IPV6=m
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_NAT=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_BRIDGE=m
+CONFIG_BRIDGE_VLAN_FILTERING=y
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_VLAN_8021Q_MVRP=y
+CONFIG_BPF_JIT=y
+CONFIG_BT=m
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=m
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=m
+# CONFIG_BT_HS is not set
+# CONFIG_BT_LE is not set
+CONFIG_BT_LEDS=y
+# CONFIG_BT_DEBUGFS is not set
+CONFIG_BT_HCIBTUSB=m
+CONFIG_BT_HCIBTSDIO=m
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_BCSP=y
+CONFIG_BT_HCIUART_LL=y
+CONFIG_BT_HCIUART_3WIRE=y
+CONFIG_BT_HCIUART_BCM=y
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
+CONFIG_MAC80211_LEDS=y
+CONFIG_RFKILL=y
+CONFIG_WCN36XX=m
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=32
+CONFIG_SIMPLE_PM_BUS=y
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_DENALI_DT=y
+CONFIG_MTD_NAND_MARVELL=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_NBD=m
+CONFIG_VIRTIO_BLK=y
+CONFIG_BLK_DEV_NVME=m
+CONFIG_QCOM_COINCELL=m
+CONFIG_SRAM=y
+CONFIG_EEPROM_AT25=m
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+CONFIG_SCSI_SAS_ATA=y
+CONFIG_SCSI_HISI_SAS=y
+CONFIG_SCSI_HISI_SAS_PCI=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_AHCI_CEVA=y
+CONFIG_AHCI_MVEBU=y
+CONFIG_AHCI_XGENE=y
+CONFIG_AHCI_QORIQ=y
+CONFIG_SATA_SIL24=y
+CONFIG_SATA_RCAR=y
+CONFIG_PATA_PLATFORM=y
+CONFIG_PATA_OF_PLATFORM=y
+CONFIG_NETDEVICES=y
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_TUN=y
+CONFIG_VETH=m
+CONFIG_VIRTIO_NET=y
+CONFIG_AMD_XGBE=y
+CONFIG_NET_XGENE=y
+CONFIG_ATL1C=m
+CONFIG_MACB=y
+CONFIG_THUNDER_NIC_PF=y
+CONFIG_HIX5HD2_GMAC=y
+CONFIG_HNS_DSAF=y
+CONFIG_HNS_ENET=y
+CONFIG_E1000E=y
+CONFIG_IGB=y
+CONFIG_IGBVF=y
+CONFIG_MVNETA=y
+CONFIG_MVPP2=y
+CONFIG_SKY2=y
+CONFIG_QCOM_EMAC=m
+CONFIG_RAVB=y
+CONFIG_SMC91X=y
+CONFIG_SMSC911X=y
+CONFIG_SNI_AVE=y
+CONFIG_SNI_NETSEC=y
+CONFIG_STMMAC_ETH=m
+CONFIG_DWMAC_IPQ806X=m
+CONFIG_DWMAC_MESON=m
+CONFIG_DWMAC_ROCKCHIP=m
+CONFIG_DWMAC_SUNXI=m
+CONFIG_DWMAC_SUN8I=m
+CONFIG_MDIO_BUS_MUX_MMIOREG=y
+CONFIG_AT803X_PHY=m
+CONFIG_MARVELL_PHY=m
+CONFIG_MARVELL_10G_PHY=m
+CONFIG_MESON_GXL_PHY=m
+CONFIG_MICREL_PHY=y
+CONFIG_REALTEK_PHY=m
+CONFIG_ROCKCHIP_PHY=y
+CONFIG_USB_PEGASUS=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_RTL8152=m
+CONFIG_USB_LAN78XX=m
+CONFIG_USB_USBNET=m
+CONFIG_USB_NET_DM9601=m
+CONFIG_USB_NET_SR9800=m
+CONFIG_USB_NET_SMSC75XX=m
+CONFIG_USB_NET_SMSC95XX=m
+CONFIG_USB_NET_PLUSB=m
+CONFIG_USB_NET_MCS7830=m
+CONFIG_ATH10K=m
+CONFIG_ATH10K_PCI=m
+CONFIG_BRCMFMAC=m
+CONFIG_MWIFIEX=m
+CONFIG_MWIFIEX_PCIE=m
+CONFIG_WL18XX=m
+CONFIG_WLCORE_SDIO=m
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_ADC=m
+CONFIG_KEYBOARD_CROS_EC=y
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ATMEL_MXT=m
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_PM8941_PWRKEY=y
+CONFIG_INPUT_HISI_POWERKEY=y
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_AMBAKMI=y
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_BCM2835AUX=y
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_8250_MT6577=y
+CONFIG_SERIAL_8250_UNIPHIER=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_MESON=y
+CONFIG_SERIAL_MESON_CONSOLE=y
+CONFIG_SERIAL_SAMSUNG=y
+CONFIG_SERIAL_SAMSUNG_CONSOLE=y
+CONFIG_SERIAL_TEGRA=y
+CONFIG_SERIAL_SH_SCI=y
+CONFIG_SERIAL_SH_SCI_NR_UARTS=11
+CONFIG_SERIAL_SH_SCI_CONSOLE=y
+CONFIG_SERIAL_MSM=y
+CONFIG_SERIAL_QCOM_GENI=y
+CONFIG_SERIAL_QCOM_GENI_CONSOLE=y
+CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_SERIAL_XILINX_PS_UART=y
+CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
+CONFIG_SERIAL_MVEBU_UART=y
+CONFIG_SERIAL_DEV_BUS=y
+CONFIG_SERIAL_DEV_CTRL_TTYPORT=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_I2C_HID=m
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_MUX_PCA954x=y
+CONFIG_I2C_BCM2835=m
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
+CONFIG_I2C_IMX=y
+CONFIG_I2C_MESON=y
+CONFIG_I2C_MV64XXX=y
+CONFIG_I2C_PXA=y
+CONFIG_I2C_QUP=y
+CONFIG_I2C_RK3X=y
+CONFIG_I2C_SH_MOBILE=y
+CONFIG_I2C_TEGRA=y
+CONFIG_I2C_UNIPHIER_F=y
+CONFIG_I2C_RCAR=y
+CONFIG_I2C_CROS_EC_TUNNEL=y
+CONFIG_SPI=y
+CONFIG_SPI_ARMADA_3700=y
+CONFIG_SPI_MESON_SPICC=m
+CONFIG_SPI_MESON_SPIFC=m
+CONFIG_SPI_BCM2835=m
+CONFIG_SPI_BCM2835AUX=m
+CONFIG_SPI_ORION=y
+CONFIG_SPI_PL022=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_ROCKCHIP=y
+CONFIG_SPI_S3C64XX=y
+CONFIG_SPI_SPIDEV=m
+CONFIG_SPMI=y
+CONFIG_PINCTRL_IPQ8074=y
+CONFIG_PINCTRL_SINGLE=y
+CONFIG_PINCTRL_MAX77620=y
+CONFIG_PINCTRL_MSM8916=y
+CONFIG_PINCTRL_MSM8994=y
+CONFIG_PINCTRL_MSM8996=y
+CONFIG_PINCTRL_MT7622=y
+CONFIG_PINCTRL_SDM845=y
+CONFIG_PINCTRL_QDF2XXX=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_GPIO_DWAPB=y
+CONFIG_GPIO_MB86S7X=y
+CONFIG_GPIO_PL061=y
+CONFIG_GPIO_RCAR=y
+CONFIG_GPIO_UNIPHIER=y
+CONFIG_GPIO_XGENE=y
+CONFIG_GPIO_XGENE_SB=y
+CONFIG_GPIO_PCA953X=y
+CONFIG_GPIO_PCA953X_IRQ=y
+CONFIG_GPIO_MAX77620=y
+CONFIG_POWER_AVS=y
+CONFIG_ROCKCHIP_IODOMAIN=y
+CONFIG_QCOM_CPR=y
+CONFIG_POWER_RESET_MSM=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_SYSCON_REBOOT_MODE=y
+CONFIG_BATTERY_BQ27XXX=y
+CONFIG_SENSORS_ARM_SCPI=y
+CONFIG_SENSORS_LM90=m
+CONFIG_SENSORS_INA2XX=m
+CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
+CONFIG_CPU_THERMAL=y
+CONFIG_THERMAL_EMULATION=y
+CONFIG_ARMADA_THERMAL=y
+CONFIG_BRCMSTB_THERMAL=m
+CONFIG_EXYNOS_THERMAL=y
+CONFIG_RCAR_GEN3_THERMAL=y
+CONFIG_QCOM_TSENS=y
+CONFIG_ROCKCHIP_THERMAL=m
+CONFIG_TEGRA_BPMP_THERMAL=m
+CONFIG_QCOM_SPMI_TEMP_ALARM=m
+CONFIG_UNIPHIER_THERMAL=y
+CONFIG_WATCHDOG=y
+CONFIG_S3C2410_WATCHDOG=y
+CONFIG_QCOM_WDT=m
+CONFIG_MESON_GXBB_WATCHDOG=m
+CONFIG_MESON_WATCHDOG=m
+CONFIG_RENESAS_WDT=y
+CONFIG_UNIPHIER_WATCHDOG=y
+CONFIG_BCM2835_WDT=y
+CONFIG_MFD_AXP20X_RSB=y
+CONFIG_MFD_CROS_EC=y
+CONFIG_MFD_CROS_EC_I2C=y
+CONFIG_MFD_CROS_EC_SPI=y
+CONFIG_MFD_CROS_EC_CHARDEV=m
+CONFIG_MFD_EXYNOS_LPASS=m
+CONFIG_MFD_HI6421_PMIC=y
+CONFIG_MFD_HI655X_PMIC=y
+CONFIG_MFD_MAX77620=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_RK808=y
+CONFIG_MFD_SEC_CORE=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_AXP20X=y
+CONFIG_REGULATOR_FAN53555=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_GPIO=y
+CONFIG_REGULATOR_HI6421V530=y
+CONFIG_REGULATOR_HI655X=y
+CONFIG_REGULATOR_MAX77620=y
+CONFIG_REGULATOR_PWM=y
+CONFIG_REGULATOR_QCOM_RPMH=y
+CONFIG_REGULATOR_QCOM_SMD_RPM=y
+CONFIG_REGULATOR_QCOM_SPMI=y
+CONFIG_REGULATOR_RK808=y
+CONFIG_REGULATOR_S2MPS11=y
+CONFIG_MEDIA_SUPPORT=m
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_ANALOG_TV_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_MEDIA_RC_SUPPORT=y
+CONFIG_RC_CORE=m
+CONFIG_RC_DEVICES=y
+CONFIG_RC_DECODERS=y
+CONFIG_IR_MESON=m
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+# CONFIG_DVB_NET is not set
+CONFIG_V4L_MEM2MEM_DRIVERS=y
+CONFIG_VIDEO_SAMSUNG_S5P_JPEG=m
+CONFIG_VIDEO_SAMSUNG_S5P_MFC=m
+CONFIG_VIDEO_SAMSUNG_EXYNOS_GSC=m
+CONFIG_VIDEO_RENESAS_FCP=m
+CONFIG_VIDEO_RENESAS_VSP1=m
+CONFIG_DRM=y
+CONFIG_DRM_NOUVEAU=m
+CONFIG_DRM_EXYNOS=m
+CONFIG_DRM_EXYNOS5433_DECON=y
+CONFIG_DRM_EXYNOS7_DECON=y
+CONFIG_DRM_EXYNOS_DSI=y
+# CONFIG_DRM_EXYNOS_DP is not set
+CONFIG_DRM_EXYNOS_HDMI=y
+CONFIG_DRM_EXYNOS_MIC=y
+CONFIG_DRM_ROCKCHIP=m
+CONFIG_ROCKCHIP_ANALOGIX_DP=y
+CONFIG_ROCKCHIP_CDN_DP=y
+CONFIG_ROCKCHIP_DW_HDMI=y
+CONFIG_ROCKCHIP_DW_MIPI_DSI=y
+CONFIG_ROCKCHIP_INNO_HDMI=y
+CONFIG_DRM_RCAR_DU=m
+CONFIG_DRM_RCAR_LVDS=y
+CONFIG_DRM_RCAR_VSP=y
+CONFIG_DRM_TEGRA=m
+CONFIG_DRM_PANEL_SIMPLE=m
+CONFIG_DRM_I2C_ADV7511=m
+CONFIG_DRM_I2C_ADV7511_AUDIO=y
+CONFIG_DRM_VC4=m
+CONFIG_DRM_HISI_HIBMC=m
+CONFIG_DRM_HISI_KIRIN=m
+CONFIG_DRM_MESON=m
+CONFIG_FB=y
+CONFIG_FB_ARMCLCD=y
+CONFIG_BACKLIGHT_GENERIC=m
+CONFIG_BACKLIGHT_PWM=m
+CONFIG_BACKLIGHT_LP855X=m
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SND_BCM2835_SOC_I2S=m
+CONFIG_SND_SOC_QCOM=y
+CONFIG_SND_SOC_APQ8016_SBC=y
+CONFIG_SND_SOC_QDSP6=y
+CONFIG_SND_SOC_MSM8996=y
+CONFIG_SND_SOC_SAMSUNG=y
+CONFIG_SND_SOC_RCAR=m
+CONFIG_SND_SOC_AK4613=m
+CONFIG_SND_SIMPLE_CARD=y
+CONFIG_SND_AUDIO_GRAPH_CARD=m
+CONFIG_SND_SOC_MSM8916_WCD_ANALOG=y
+CONFIG_SND_SOC_MSM8916_WCD_DIGITAL=y
+CONFIG_USB=y
+CONFIG_USB_OTG=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_TEGRA=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_EXYNOS=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_EXYNOS=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_RENESAS_USBHS=m
+CONFIG_USB_STORAGE=y
+CONFIG_USB_MUSB_HDRC=y
+CONFIG_USB_MUSB_SUNXI=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC2=y
+CONFIG_USB_CHIPIDEA=y
+CONFIG_USB_CHIPIDEA_UDC=y
+CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_USB_CHIPIDEA_ULPI=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_HSIC_USB3503=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_ULPI=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_RENESAS_USBHS_UDC=m
+CONFIG_USB_RENESAS_USB3=m
+CONFIG_USB_ULPI_BUS=y
+CONFIG_MMC=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_ARMMMCI=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_ACPI=y
+CONFIG_MMC_SDHCI_F_SDH30=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_OF_ARASAN=y
+CONFIG_MMC_SDHCI_OF_ESDHC=y
+CONFIG_MMC_SDHCI_CADENCE=y
+CONFIG_MMC_SDHCI_TEGRA=y
+CONFIG_MMC_MESON_GX=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_SPI=y
+CONFIG_MMC_SDHI=y
+CONFIG_MMC_DW=y
+CONFIG_MMC_DW_EXYNOS=y
+CONFIG_MMC_DW_HI3798CV200=y
+CONFIG_MMC_DW_K3=y
+CONFIG_MMC_DW_ROCKCHIP=y
+CONFIG_MMC_SUNXI=y
+CONFIG_MMC_BCM2835=y
+CONFIG_MMC_SDHCI_XENON=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_PWM=y
+CONFIG_LEDS_SYSCON=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_LEDS_TRIGGER_PANIC=y
+CONFIG_LEDS_TRIGGER_DISK=y
+CONFIG_EDAC=y
+CONFIG_EDAC_GHES=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_MAX77686=y
+CONFIG_RTC_DRV_RK808=m
+CONFIG_RTC_DRV_S5M=y
+CONFIG_RTC_DRV_DS3232=y
+CONFIG_RTC_DRV_EFI=y
+CONFIG_RTC_DRV_S3C=y
+CONFIG_RTC_DRV_PL031=y
+CONFIG_RTC_DRV_SUN6I=y
+CONFIG_RTC_DRV_ARMADA38X=y
+CONFIG_RTC_DRV_PM8XXX=m
+CONFIG_RTC_DRV_TEGRA=y
+CONFIG_RTC_DRV_XGENE=y
+CONFIG_RTC_DRV_CROS_EC=y
+CONFIG_DMADEVICES=y
+CONFIG_DMA_BCM2835=m
+CONFIG_K3_DMA=y
+CONFIG_MV_XOR_V2=y
+CONFIG_PL330_DMA=y
+CONFIG_TEGRA20_APB_DMA=y
+CONFIG_QCOM_BAM_DMA=y
+CONFIG_QCOM_HIDMA_MGMT=y
+CONFIG_QCOM_HIDMA=y
+CONFIG_RCAR_DMAC=y
+CONFIG_RENESAS_USB_DMAC=m
+CONFIG_VFIO=y
+CONFIG_VFIO_PCI=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_XEN_GNTDEV=y
+CONFIG_XEN_GRANT_DEV_ALLOC=y
+CONFIG_COMMON_CLK_RK808=y
+CONFIG_COMMON_CLK_SCPI=y
+CONFIG_COMMON_CLK_CS2000_CP=y
+CONFIG_COMMON_CLK_S2MPS11=y
+CONFIG_CLK_QORIQ=y
+CONFIG_COMMON_CLK_PWM=y
+CONFIG_COMMON_CLK_QCOM=y
+CONFIG_QCOM_CLK_SMD_RPM=y
+CONFIG_IPQ_GCC_8074=y
+CONFIG_MSM_CLK_RPMH=y
+CONFIG_MSM_GCC_8916=y
+CONFIG_MSM_GCC_8994=y
+CONFIG_MSM_MMCC_8996=y
+CONFIG_MSM_APCC_8996=y
+CONFIG_HWSPINLOCK=y
+CONFIG_SDM_GCC_845=y
+CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_ARM_MHU=y
+CONFIG_PLATFORM_MHU=y
+CONFIG_BCM2835_MBOX=y
+CONFIG_HI6220_MBOX=y
+CONFIG_QCOM_APCS_IPC=y
+CONFIG_ROCKCHIP_IOMMU=y
+CONFIG_TEGRA_IOMMU_SMMU=y
+CONFIG_ARM_SMMU=y
+CONFIG_ARM_SMMU_V3=y
+CONFIG_QCOM_IOMMU=y
+CONFIG_REMOTEPROC=y
+CONFIG_QCOM_ADSP_PIL=y
+CONFIG_QCOM_Q6V5_PIL=y
+CONFIG_QCOM_WCNSS_PIL=y
+CONFIG_RPMSG_QCOM_GLINK_RPM=y
+CONFIG_RPMSG_QCOM_GLINK_SMEM=y
+CONFIG_RPMSG_QCOM_SMD=y
+CONFIG_RASPBERRYPI_POWER=y
+CONFIG_QCOM_RPMH=y
+CONFIG_QCOM_SMEM=y
+CONFIG_QCOM_SMD_RPM=y
+CONFIG_QCOM_SMP2P=y
+CONFIG_QCOM_SMSM=y
+CONFIG_QCOM_WCNSS_CTRL=y
+CONFIG_QCOM_APR=y
+CONFIG_ROCKCHIP_PM_DOMAINS=y
+CONFIG_ARCH_TEGRA_132_SOC=y
+CONFIG_ARCH_TEGRA_210_SOC=y
+CONFIG_ARCH_TEGRA_186_SOC=y
+CONFIG_ARCH_TEGRA_194_SOC=y
+CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
+CONFIG_EXTCON_USB_GPIO=y
+CONFIG_EXTCON_USBC_CROS_EC=y
+CONFIG_MEMORY=y
+CONFIG_TEGRA_MC=y
+CONFIG_IIO=y
+CONFIG_EXYNOS_ADC=y
+CONFIG_ROCKCHIP_SARADC=m
+CONFIG_IIO_CROS_EC_SENSORS_CORE=m
+CONFIG_IIO_CROS_EC_SENSORS=m
+CONFIG_IIO_CROS_EC_LIGHT_PROX=m
+CONFIG_IIO_CROS_EC_BARO=m
+CONFIG_PWM=y
+CONFIG_PWM_BCM2835=m
+CONFIG_PWM_CROS_EC=m
+CONFIG_PWM_MESON=m
+CONFIG_PWM_RCAR=m
+CONFIG_PWM_ROCKCHIP=y
+CONFIG_PWM_SAMSUNG=y
+CONFIG_PWM_TEGRA=m
+CONFIG_PHY_HISTB_COMBPHY=y
+CONFIG_PHY_HISI_INNO_USB2=y
+CONFIG_PHY_RCAR_GEN3_USB2=y
+CONFIG_PHY_RCAR_GEN3_USB3=m
+CONFIG_PHY_HI6220_USB=y
+CONFIG_PHY_QCOM_QMP=y
+CONFIG_PHY_QCOM_USB_HS=y
+CONFIG_PHY_SUN4I_USB=y
+CONFIG_PHY_MVEBU_CP110_COMPHY=y
+CONFIG_PHY_QCOM_QMP=m
+CONFIG_PHY_ROCKCHIP_INNO_USB2=y
+CONFIG_PHY_ROCKCHIP_EMMC=y
+CONFIG_PHY_ROCKCHIP_PCIE=m
+CONFIG_PHY_ROCKCHIP_TYPEC=y
+CONFIG_PHY_XGENE=y
+CONFIG_PHY_TEGRA_XUSB=y
+CONFIG_QCOM_L2_PMU=y
+CONFIG_QCOM_L3_PMU=y
+CONFIG_MESON_EFUSE=m
+CONFIG_QCOM_QFPROM=y
+CONFIG_ROCKCHIP_EFUSE=y
+CONFIG_UNIPHIER_EFUSE=y
+CONFIG_TEE=y
+CONFIG_OPTEE=y
+CONFIG_ARM_SCPI_PROTOCOL=y
+CONFIG_RASPBERRYPI_FIRMWARE=y
+CONFIG_EFI_CAPSULE_LOADER=y
+CONFIG_ACPI=y
+CONFIG_ACPI_APEI=y
+CONFIG_ACPI_APEI_GHES=y
+CONFIG_ACPI_APEI_PCIEAER=y
+CONFIG_ACPI_APEI_MEMORY_FAILURE=y
+CONFIG_ACPI_APEI_EINJ=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
+CONFIG_QUOTA=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
+CONFIG_VFAT_FS=y
+CONFIG_HUGETLBFS=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_EFIVAR_FS=y
+CONFIG_SQUASHFS=y
+CONFIG_UFS_FS=y
+CONFIG_UFS_FS_WRITE=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
+CONFIG_9P_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_LOCKUP_DETECTOR=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_FTRACE is not set
+CONFIG_MEMTEST=y
+CONFIG_SECURITY=y
+CONFIG_CRYPTO_ECHAINIV=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA256_ARM64=m
+CONFIG_CRYPTO_SHA512_ARM64=m
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m
+CONFIG_CRYPTO_CRC32_ARM64_CE=m
+CONFIG_CRYPTO_AES_ARM64=m
+CONFIG_CRYPTO_AES_ARM64_CE=m
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=m
+CONFIG_CRYPTO_CHACHA20_NEON=m
+CONFIG_CRYPTO_AES_ARM64_BS=m
+CONFIG_CRYPTO_SHA512_ARM64_CE=m
+CONFIG_CRYPTO_SHA3_ARM64=m
+CONFIG_CRYPTO_SM3_ARM64_CE=m
+CONFIG_QTI_RPMH_MBOX=y
+CONFIG_QTI_RPMH_API=y
+CONFIG_QCOM_COMMAND_DB=y
+CONFIG_REGULATOR_RPMH=y
+CONFIG_QCOM_GENI_SE=y
diff --git a/rr-cache/3152d54bb8498f5b3a678c7d7e9641f795736fa8/preimage b/rr-cache/3152d54bb8498f5b3a678c7d7e9641f795736fa8/preimage
new file mode 100644
index 0000000..0ce3472
--- /dev/null
+++ b/rr-cache/3152d54bb8498f5b3a678c7d7e9641f795736fa8/preimage
@@ -0,0 +1,801 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_NUMA_BALANCING=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_USER_NS=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_JUMP_LABEL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_ARCH_SUNXI=y
+CONFIG_ARCH_ALPINE=y
+CONFIG_ARCH_BCM2835=y
+CONFIG_ARCH_BCM_IPROC=y
+CONFIG_ARCH_BERLIN=y
+CONFIG_ARCH_BRCMSTB=y
+CONFIG_ARCH_EXYNOS=y
+CONFIG_ARCH_LAYERSCAPE=y
+CONFIG_ARCH_LG1K=y
+CONFIG_ARCH_HISI=y
+CONFIG_ARCH_MEDIATEK=y
+CONFIG_ARCH_MESON=y
+CONFIG_ARCH_MVEBU=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_MSM8996=y
+CONFIG_ARCH_ROCKCHIP=y
+CONFIG_ARCH_SEATTLE=y
+CONFIG_ARCH_RENESAS=y
+CONFIG_ARCH_R8A7795=y
+CONFIG_ARCH_R8A7796=y
+CONFIG_ARCH_R8A77965=y
+CONFIG_ARCH_R8A77970=y
+CONFIG_ARCH_R8A77980=y
+CONFIG_ARCH_R8A77990=y
+CONFIG_ARCH_R8A77995=y
+CONFIG_ARCH_STRATIX10=y
+CONFIG_ARCH_TEGRA=y
+CONFIG_ARCH_SPRD=y
+CONFIG_ARCH_SYNQUACER=y
+CONFIG_ARCH_THUNDER=y
+CONFIG_ARCH_THUNDER2=y
+CONFIG_ARCH_UNIPHIER=y
+CONFIG_ARCH_VEXPRESS=y
+CONFIG_ARCH_XGENE=y
+CONFIG_ARCH_ZX=y
+CONFIG_ARCH_ZYNQMP=y
+CONFIG_PCI=y
+CONFIG_HOTPLUG_PCI_PCIE=y
+CONFIG_PCI_IOV=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_ACPI=y
+CONFIG_PCI_LAYERSCAPE=y
+CONFIG_PCI_HISI=y
+CONFIG_PCIE_QCOM=y
+CONFIG_PCIE_KIRIN=y
+CONFIG_PCIE_ARMADA_8K=y
+CONFIG_PCIE_HISI_STB=y
+CONFIG_PCI_AARDVARK=y
+CONFIG_PCI_TEGRA=y
+CONFIG_PCIE_RCAR=y
+CONFIG_PCIE_ROCKCHIP=y
+CONFIG_PCIE_ROCKCHIP_HOST=m
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PCI_XGENE=y
+CONFIG_PCI_HOST_THUNDER_PEM=y
+CONFIG_PCI_HOST_THUNDER_ECAM=y
+CONFIG_ARM64_VA_BITS_48=y
+CONFIG_SCHED_MC=y
+CONFIG_NUMA=y
+CONFIG_PREEMPT=y
+CONFIG_KSM=y
+CONFIG_MEMORY_FAILURE=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CMA=y
+CONFIG_SECCOMP=y
+CONFIG_KEXEC=y
+CONFIG_CRASH_DUMP=y
+CONFIG_XEN=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_COMPAT=y
+CONFIG_HIBERNATION=y
+CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_ATTR_SET=y
+CONFIG_CPU_FREQ_GOV_COMMON=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=m
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPUFREQ_DT=y
+CONFIG_ARM_ARMADA_37XX_CPUFREQ=y
+CONFIG_ARM_BIG_LITTLE_CPUFREQ=y
+CONFIG_ARM_SCPI_CPUFREQ=y
+CONFIG_ARM_TEGRA186_CPUFREQ=y
+CONFIG_ACPI_CPPC_CPUFREQ=m
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IPV6=m
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_NAT=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_BRIDGE=m
+CONFIG_BRIDGE_VLAN_FILTERING=y
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_VLAN_8021Q_MVRP=y
+CONFIG_BPF_JIT=y
+CONFIG_BT=m
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=m
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=m
+# CONFIG_BT_HS is not set
+# CONFIG_BT_LE is not set
+CONFIG_BT_LEDS=y
+<<<<<<<
+# CONFIG_BT_DEBUGFS is not set
+CONFIG_BT_HCIBTUSB=m
+=======
+CONFIG_BT_HCIBTUSB=m
+CONFIG_BT_HCIBTSDIO=m
+>>>>>>>
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_BCSP=y
+CONFIG_BT_HCIUART_LL=y
+<<<<<<<
+CONFIG_BT_HCIUART_3WIRE=y
+CONFIG_CFG80211=y
+# CONFIG_CFG80211_DEFAULT_PS is not set
+CONFIG_CFG80211_WEXT=y
+CONFIG_MAC80211=y
+=======
+CONFIG_BT_HCIUART_BCM=y
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
+>>>>>>>
+CONFIG_MAC80211_LEDS=y
+CONFIG_RFKILL=y
+CONFIG_WCN36XX=m
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DMA_CMA=y
+<<<<<<<
+=======
+CONFIG_CMA_SIZE_MBYTES=32
+CONFIG_HISILICON_LPC=y
+>>>>>>>
+CONFIG_SIMPLE_PM_BUS=y
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_DENALI_DT=y
+CONFIG_MTD_NAND_MARVELL=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_NBD=m
+CONFIG_VIRTIO_BLK=y
+CONFIG_BLK_DEV_NVME=m
+CONFIG_QCOM_COINCELL=m
+CONFIG_SRAM=y
+CONFIG_EEPROM_AT25=m
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+CONFIG_SCSI_SAS_ATA=y
+CONFIG_SCSI_HISI_SAS=y
+CONFIG_SCSI_HISI_SAS_PCI=y
+<<<<<<<
+CONFIG_SCSI_UFSHCD=m
+CONFIG_SCSI_UFSHCD_PLATFORM=m
+CONFIG_SCSI_UFS_QCOM=m
+=======
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+>>>>>>>
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_AHCI_CEVA=y
+CONFIG_AHCI_MVEBU=y
+CONFIG_AHCI_XGENE=y
+CONFIG_AHCI_QORIQ=y
+CONFIG_SATA_SIL24=y
+CONFIG_SATA_RCAR=y
+CONFIG_PATA_PLATFORM=y
+CONFIG_PATA_OF_PLATFORM=y
+CONFIG_NETDEVICES=y
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_TUN=y
+CONFIG_VETH=m
+CONFIG_VIRTIO_NET=y
+CONFIG_AMD_XGBE=y
+CONFIG_NET_XGENE=y
+<<<<<<<
+CONFIG_ATL1C=m
+=======
+CONFIG_ATL1C=y
+>>>>>>>
+CONFIG_MACB=y
+CONFIG_THUNDER_NIC_PF=y
+CONFIG_HIX5HD2_GMAC=y
+CONFIG_HNS_DSAF=y
+CONFIG_HNS_ENET=y
+CONFIG_E1000E=y
+CONFIG_IGB=y
+CONFIG_IGBVF=y
+CONFIG_MVNETA=y
+CONFIG_MVPP2=y
+CONFIG_SKY2=y
+CONFIG_QCOM_EMAC=m
+CONFIG_RAVB=y
+CONFIG_SMC91X=y
+CONFIG_SMSC911X=y
+CONFIG_SNI_AVE=y
+CONFIG_SNI_NETSEC=y
+CONFIG_STMMAC_ETH=m
+CONFIG_DWMAC_IPQ806X=m
+CONFIG_DWMAC_MESON=m
+CONFIG_DWMAC_ROCKCHIP=m
+CONFIG_DWMAC_SUNXI=m
+CONFIG_DWMAC_SUN8I=m
+CONFIG_MDIO_BUS_MUX_MMIOREG=y
+CONFIG_AT803X_PHY=m
+CONFIG_MARVELL_PHY=m
+CONFIG_MARVELL_10G_PHY=m
+CONFIG_MESON_GXL_PHY=m
+CONFIG_MICREL_PHY=y
+CONFIG_REALTEK_PHY=m
+CONFIG_ROCKCHIP_PHY=y
+CONFIG_USB_PEGASUS=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_RTL8152=m
+CONFIG_USB_LAN78XX=m
+CONFIG_USB_USBNET=m
+CONFIG_USB_NET_DM9601=m
+CONFIG_USB_NET_SR9800=m
+CONFIG_USB_NET_SMSC75XX=m
+CONFIG_USB_NET_SMSC95XX=m
+CONFIG_USB_NET_PLUSB=m
+CONFIG_USB_NET_MCS7830=m
+CONFIG_ATH10K=m
+CONFIG_ATH10K_PCI=m
+CONFIG_BRCMFMAC=m
+<<<<<<<
+CONFIG_ATH10K=y
+CONFIG_ATH10K_PCI=y
+=======
+CONFIG_MWIFIEX=m
+CONFIG_MWIFIEX_PCIE=m
+>>>>>>>
+CONFIG_WL18XX=m
+CONFIG_WLCORE_SDIO=m
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_ADC=m
+CONFIG_KEYBOARD_CROS_EC=y
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ATMEL_MXT=m
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_PM8941_PWRKEY=y
+CONFIG_INPUT_HISI_POWERKEY=y
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_AMBAKMI=y
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_BCM2835AUX=y
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_8250_MT6577=y
+CONFIG_SERIAL_8250_UNIPHIER=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_MESON=y
+CONFIG_SERIAL_MESON_CONSOLE=y
+CONFIG_SERIAL_SAMSUNG=y
+CONFIG_SERIAL_SAMSUNG_CONSOLE=y
+CONFIG_SERIAL_TEGRA=y
+CONFIG_SERIAL_SH_SCI=y
+CONFIG_SERIAL_SH_SCI_NR_UARTS=11
+CONFIG_SERIAL_SH_SCI_CONSOLE=y
+CONFIG_SERIAL_MSM=y
+CONFIG_SERIAL_QCOM_GENI=y
+CONFIG_SERIAL_QCOM_GENI_CONSOLE=y
+CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_SERIAL_XILINX_PS_UART=y
+CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
+CONFIG_SERIAL_MVEBU_UART=y
+CONFIG_SERIAL_DEV_BUS=y
+CONFIG_SERIAL_DEV_CTRL_TTYPORT=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_I2C_HID=m
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_MUX_PCA954x=y
+CONFIG_I2C_BCM2835=m
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
+CONFIG_I2C_IMX=y
+CONFIG_I2C_MESON=y
+CONFIG_I2C_MV64XXX=y
+CONFIG_I2C_PXA=y
+CONFIG_I2C_QUP=y
+CONFIG_I2C_RK3X=y
+CONFIG_I2C_SH_MOBILE=y
+CONFIG_I2C_TEGRA=y
+CONFIG_I2C_UNIPHIER_F=y
+CONFIG_I2C_RCAR=y
+CONFIG_I2C_CROS_EC_TUNNEL=y
+CONFIG_SPI=y
+CONFIG_SPI_ARMADA_3700=y
+CONFIG_SPI_MESON_SPICC=m
+CONFIG_SPI_MESON_SPIFC=m
+CONFIG_SPI_BCM2835=m
+CONFIG_SPI_BCM2835AUX=m
+CONFIG_SPI_ORION=y
+CONFIG_SPI_PL022=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_ROCKCHIP=y
+CONFIG_SPI_S3C64XX=y
+CONFIG_SPI_SPIDEV=m
+CONFIG_SPMI=y
+CONFIG_PINCTRL_IPQ8074=y
+CONFIG_PINCTRL_SINGLE=y
+CONFIG_PINCTRL_MAX77620=y
+CONFIG_PINCTRL_MSM8916=y
+CONFIG_PINCTRL_MSM8994=y
+CONFIG_PINCTRL_MSM8996=y
+<<<<<<<
+CONFIG_PINCTRL_MT7622=y
+=======
+CONFIG_PINCTRL_SDM845=y
+>>>>>>>
+CONFIG_PINCTRL_QDF2XXX=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_GPIO_DWAPB=y
+CONFIG_GPIO_MB86S7X=y
+CONFIG_GPIO_PL061=y
+CONFIG_GPIO_RCAR=y
+CONFIG_GPIO_UNIPHIER=y
+CONFIG_GPIO_XGENE=y
+CONFIG_GPIO_XGENE_SB=y
+CONFIG_GPIO_PCA953X=y
+CONFIG_GPIO_PCA953X_IRQ=y
+CONFIG_GPIO_MAX77620=y
+CONFIG_POWER_AVS=y
+<<<<<<<
+=======
+CONFIG_ROCKCHIP_IODOMAIN=y
+CONFIG_QCOM_CPR=y
+>>>>>>>
+CONFIG_POWER_RESET_MSM=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_SYSCON_REBOOT_MODE=y
+CONFIG_BATTERY_BQ27XXX=y
+CONFIG_SENSORS_ARM_SCPI=y
+CONFIG_SENSORS_LM90=m
+CONFIG_SENSORS_INA2XX=m
+CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
+CONFIG_CPU_THERMAL=y
+CONFIG_THERMAL_EMULATION=y
+CONFIG_ARMADA_THERMAL=y
+CONFIG_BRCMSTB_THERMAL=m
+CONFIG_EXYNOS_THERMAL=y
+CONFIG_RCAR_GEN3_THERMAL=y
+CONFIG_QCOM_TSENS=y
+CONFIG_ROCKCHIP_THERMAL=m
+CONFIG_TEGRA_BPMP_THERMAL=m
+CONFIG_QCOM_SPMI_TEMP_ALARM=m
+CONFIG_UNIPHIER_THERMAL=y
+CONFIG_WATCHDOG=y
+CONFIG_S3C2410_WATCHDOG=y
+CONFIG_QCOM_WDT=m
+CONFIG_MESON_GXBB_WATCHDOG=m
+CONFIG_MESON_WATCHDOG=m
+CONFIG_RENESAS_WDT=y
+CONFIG_UNIPHIER_WATCHDOG=y
+CONFIG_BCM2835_WDT=y
+CONFIG_MFD_AXP20X_RSB=y
+CONFIG_MFD_CROS_EC=y
+CONFIG_MFD_CROS_EC_I2C=y
+CONFIG_MFD_CROS_EC_SPI=y
+CONFIG_MFD_CROS_EC_CHARDEV=m
+CONFIG_MFD_EXYNOS_LPASS=m
+CONFIG_MFD_HI6421_PMIC=y
+CONFIG_MFD_HI655X_PMIC=y
+CONFIG_MFD_MAX77620=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_RK808=y
+CONFIG_MFD_SEC_CORE=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_AXP20X=y
+CONFIG_REGULATOR_FAN53555=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_GPIO=y
+CONFIG_REGULATOR_HI6421V530=y
+CONFIG_REGULATOR_HI655X=y
+CONFIG_REGULATOR_MAX77620=y
+CONFIG_REGULATOR_PWM=y
+CONFIG_REGULATOR_QCOM_RPMH=y
+CONFIG_REGULATOR_QCOM_SMD_RPM=y
+CONFIG_REGULATOR_QCOM_SPMI=y
+CONFIG_REGULATOR_RK808=y
+CONFIG_REGULATOR_S2MPS11=y
+CONFIG_MEDIA_SUPPORT=m
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_ANALOG_TV_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_MEDIA_RC_SUPPORT=y
+CONFIG_RC_CORE=m
+CONFIG_RC_DEVICES=y
+CONFIG_RC_DECODERS=y
+CONFIG_IR_MESON=m
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+# CONFIG_DVB_NET is not set
+CONFIG_V4L_MEM2MEM_DRIVERS=y
+CONFIG_VIDEO_SAMSUNG_S5P_JPEG=m
+CONFIG_VIDEO_SAMSUNG_S5P_MFC=m
+CONFIG_VIDEO_SAMSUNG_EXYNOS_GSC=m
+CONFIG_VIDEO_RENESAS_FCP=m
+CONFIG_VIDEO_RENESAS_VSP1=m
+CONFIG_DRM=y
+CONFIG_DRM_NOUVEAU=m
+CONFIG_DRM_EXYNOS=m
+CONFIG_DRM_EXYNOS5433_DECON=y
+CONFIG_DRM_EXYNOS7_DECON=y
+CONFIG_DRM_EXYNOS_DSI=y
+# CONFIG_DRM_EXYNOS_DP is not set
+CONFIG_DRM_EXYNOS_HDMI=y
+CONFIG_DRM_EXYNOS_MIC=y
+CONFIG_DRM_ROCKCHIP=m
+CONFIG_ROCKCHIP_ANALOGIX_DP=y
+CONFIG_ROCKCHIP_CDN_DP=y
+CONFIG_ROCKCHIP_DW_HDMI=y
+CONFIG_ROCKCHIP_DW_MIPI_DSI=y
+CONFIG_ROCKCHIP_INNO_HDMI=y
+CONFIG_DRM_RCAR_DU=m
+CONFIG_DRM_RCAR_LVDS=y
+CONFIG_DRM_RCAR_VSP=y
+CONFIG_DRM_TEGRA=m
+CONFIG_DRM_PANEL_SIMPLE=m
+CONFIG_DRM_I2C_ADV7511=m
+CONFIG_DRM_I2C_ADV7511_AUDIO=y
+CONFIG_DRM_VC4=m
+CONFIG_DRM_HISI_HIBMC=m
+CONFIG_DRM_HISI_KIRIN=m
+CONFIG_DRM_MESON=m
+CONFIG_FB=y
+CONFIG_FB_ARMCLCD=y
+CONFIG_BACKLIGHT_GENERIC=m
+CONFIG_BACKLIGHT_PWM=m
+CONFIG_BACKLIGHT_LP855X=m
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SND_BCM2835_SOC_I2S=m
+CONFIG_SND_SOC_QCOM=y
+CONFIG_SND_SOC_APQ8016_SBC=y
+CONFIG_SND_SOC_QDSP6=y
+CONFIG_SND_SOC_MSM8996=y
+CONFIG_SND_SOC_SAMSUNG=y
+CONFIG_SND_SOC_RCAR=m
+CONFIG_SND_SOC_AK4613=m
+<<<<<<<
+CONFIG_SND_SIMPLE_CARD=m
+CONFIG_SND_AUDIO_GRAPH_CARD=m
+=======
+CONFIG_SND_SOC_MSM8916_WCD_ANALOG=y
+CONFIG_SND_SOC_MSM8916_WCD_DIGITAL=y
+CONFIG_SND_SIMPLE_CARD=y
+>>>>>>>
+CONFIG_USB=y
+CONFIG_USB_OTG=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_TEGRA=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_EXYNOS=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_EXYNOS=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_RENESAS_USBHS=m
+CONFIG_USB_STORAGE=y
+CONFIG_USB_MUSB_HDRC=y
+CONFIG_USB_MUSB_SUNXI=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC2=y
+CONFIG_USB_CHIPIDEA=y
+CONFIG_USB_CHIPIDEA_UDC=y
+CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_USB_CHIPIDEA_ULPI=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_HSIC_USB3503=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_ULPI=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_RENESAS_USBHS_UDC=m
+CONFIG_USB_RENESAS_USB3=m
+CONFIG_USB_ULPI_BUS=y
+CONFIG_MMC=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_ARMMMCI=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_ACPI=y
+CONFIG_MMC_SDHCI_F_SDH30=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_OF_ARASAN=y
+CONFIG_MMC_SDHCI_OF_ESDHC=y
+CONFIG_MMC_SDHCI_CADENCE=y
+CONFIG_MMC_SDHCI_TEGRA=y
+CONFIG_MMC_MESON_GX=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_SPI=y
+CONFIG_MMC_SDHI=y
+CONFIG_MMC_DW=y
+CONFIG_MMC_DW_EXYNOS=y
+CONFIG_MMC_DW_HI3798CV200=y
+CONFIG_MMC_DW_K3=y
+CONFIG_MMC_DW_ROCKCHIP=y
+CONFIG_MMC_SUNXI=y
+CONFIG_MMC_BCM2835=y
+CONFIG_MMC_SDHCI_XENON=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_PWM=y
+CONFIG_LEDS_SYSCON=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_LEDS_TRIGGER_PANIC=y
+CONFIG_LEDS_TRIGGER_DISK=y
+CONFIG_EDAC=y
+CONFIG_EDAC_GHES=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_MAX77686=y
+CONFIG_RTC_DRV_RK808=m
+CONFIG_RTC_DRV_S5M=y
+CONFIG_RTC_DRV_DS3232=y
+CONFIG_RTC_DRV_EFI=y
+CONFIG_RTC_DRV_S3C=y
+CONFIG_RTC_DRV_PL031=y
+CONFIG_RTC_DRV_SUN6I=y
+CONFIG_RTC_DRV_ARMADA38X=y
+CONFIG_RTC_DRV_PM8XXX=m
+CONFIG_RTC_DRV_TEGRA=y
+CONFIG_RTC_DRV_XGENE=y
+CONFIG_RTC_DRV_CROS_EC=y
+CONFIG_DMADEVICES=y
+CONFIG_DMA_BCM2835=m
+CONFIG_K3_DMA=y
+CONFIG_MV_XOR_V2=y
+CONFIG_PL330_DMA=y
+CONFIG_TEGRA20_APB_DMA=y
+CONFIG_QCOM_BAM_DMA=y
+CONFIG_QCOM_HIDMA_MGMT=y
+CONFIG_QCOM_HIDMA=y
+CONFIG_RCAR_DMAC=y
+CONFIG_RENESAS_USB_DMAC=m
+CONFIG_VFIO=y
+CONFIG_VFIO_PCI=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_XEN_GNTDEV=y
+CONFIG_XEN_GRANT_DEV_ALLOC=y
+CONFIG_COMMON_CLK_RK808=y
+CONFIG_COMMON_CLK_SCPI=y
+CONFIG_COMMON_CLK_CS2000_CP=y
+CONFIG_COMMON_CLK_S2MPS11=y
+CONFIG_CLK_QORIQ=y
+CONFIG_COMMON_CLK_PWM=y
+CONFIG_COMMON_CLK_QCOM=y
+CONFIG_QCOM_CLK_SMD_RPM=y
+CONFIG_IPQ_GCC_8074=y
+CONFIG_MSM_CLK_RPMH=y
+CONFIG_MSM_GCC_8916=y
+CONFIG_MSM_GCC_8994=y
+CONFIG_MSM_MMCC_8996=y
+CONFIG_MSM_APCC_8996=y
+CONFIG_HWSPINLOCK=y
+CONFIG_SDM_GCC_845=y
+CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_ARM_MHU=y
+CONFIG_PLATFORM_MHU=y
+CONFIG_BCM2835_MBOX=y
+CONFIG_HI6220_MBOX=y
+CONFIG_QCOM_APCS_IPC=y
+CONFIG_ROCKCHIP_IOMMU=y
+CONFIG_TEGRA_IOMMU_SMMU=y
+CONFIG_ARM_SMMU=y
+CONFIG_ARM_SMMU_V3=y
+CONFIG_QCOM_IOMMU=y
+<<<<<<<
+CONFIG_REMOTEPROC=y
+CONFIG_QCOM_ADSP_PIL=y
+CONFIG_QCOM_Q6V5_PIL=y
+CONFIG_QCOM_WCNSS_PIL=y
+CONFIG_RPMSG_QCOM_GLINK_RPM=y
+CONFIG_RPMSG_QCOM_GLINK_SMEM=y
+=======
+CONFIG_RPMSG_QCOM_GLINK_RPM=y
+>>>>>>>
+CONFIG_RPMSG_QCOM_SMD=y
+CONFIG_RASPBERRYPI_POWER=y
+CONFIG_QCOM_RPMH=y
+CONFIG_QCOM_SMEM=y
+CONFIG_QCOM_SMD_RPM=y
+CONFIG_QCOM_SMP2P=y
+CONFIG_QCOM_SMSM=y
+CONFIG_QCOM_WCNSS_CTRL=y
+CONFIG_QCOM_APR=y
+CONFIG_ROCKCHIP_PM_DOMAINS=y
+CONFIG_ARCH_TEGRA_132_SOC=y
+CONFIG_ARCH_TEGRA_210_SOC=y
+CONFIG_ARCH_TEGRA_186_SOC=y
+CONFIG_ARCH_TEGRA_194_SOC=y
+CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
+CONFIG_EXTCON_USB_GPIO=y
+CONFIG_EXTCON_USBC_CROS_EC=y
+CONFIG_MEMORY=y
+CONFIG_TEGRA_MC=y
+CONFIG_IIO=y
+CONFIG_EXYNOS_ADC=y
+CONFIG_ROCKCHIP_SARADC=m
+CONFIG_IIO_CROS_EC_SENSORS_CORE=m
+CONFIG_IIO_CROS_EC_SENSORS=m
+CONFIG_IIO_CROS_EC_LIGHT_PROX=m
+CONFIG_IIO_CROS_EC_BARO=m
+CONFIG_PWM=y
+CONFIG_PWM_BCM2835=m
+CONFIG_PWM_CROS_EC=m
+CONFIG_PWM_MESON=m
+CONFIG_PWM_RCAR=m
+CONFIG_PWM_ROCKCHIP=y
+CONFIG_PWM_SAMSUNG=y
+CONFIG_PWM_TEGRA=m
+CONFIG_PHY_HISTB_COMBPHY=y
+CONFIG_PHY_HISI_INNO_USB2=y
+CONFIG_PHY_RCAR_GEN3_USB2=y
+CONFIG_PHY_RCAR_GEN3_USB3=m
+CONFIG_PHY_HI6220_USB=y
+CONFIG_PHY_QCOM_QMP=y
+CONFIG_PHY_QCOM_USB_HS=y
+CONFIG_PHY_SUN4I_USB=y
+CONFIG_PHY_MVEBU_CP110_COMPHY=y
+CONFIG_PHY_QCOM_QMP=m
+CONFIG_PHY_ROCKCHIP_INNO_USB2=y
+CONFIG_PHY_ROCKCHIP_EMMC=y
+CONFIG_PHY_ROCKCHIP_PCIE=m
+CONFIG_PHY_ROCKCHIP_TYPEC=y
+CONFIG_PHY_XGENE=y
+CONFIG_PHY_TEGRA_XUSB=y
+CONFIG_QCOM_L2_PMU=y
+CONFIG_QCOM_L3_PMU=y
+CONFIG_MESON_EFUSE=m
+CONFIG_QCOM_QFPROM=y
+CONFIG_ROCKCHIP_EFUSE=y
+CONFIG_UNIPHIER_EFUSE=y
+CONFIG_TEE=y
+CONFIG_OPTEE=y
+CONFIG_ARM_SCPI_PROTOCOL=y
+CONFIG_RASPBERRYPI_FIRMWARE=y
+CONFIG_EFI_CAPSULE_LOADER=y
+CONFIG_ACPI=y
+CONFIG_ACPI_APEI=y
+CONFIG_ACPI_APEI_GHES=y
+CONFIG_ACPI_APEI_PCIEAER=y
+CONFIG_ACPI_APEI_MEMORY_FAILURE=y
+CONFIG_ACPI_APEI_EINJ=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
+CONFIG_QUOTA=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
+CONFIG_VFAT_FS=y
+CONFIG_HUGETLBFS=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_EFIVAR_FS=y
+CONFIG_SQUASHFS=y
+CONFIG_UFS_FS=y
+CONFIG_UFS_FS_WRITE=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
+CONFIG_9P_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_LOCKUP_DETECTOR=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_FTRACE is not set
+CONFIG_MEMTEST=y
+CONFIG_SECURITY=y
+CONFIG_CRYPTO_ECHAINIV=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA256_ARM64=m
+CONFIG_CRYPTO_SHA512_ARM64=m
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m
+CONFIG_CRYPTO_CRC32_ARM64_CE=m
+CONFIG_CRYPTO_AES_ARM64=m
+CONFIG_CRYPTO_AES_ARM64_CE=m
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=m
+CONFIG_CRYPTO_CHACHA20_NEON=m
+CONFIG_CRYPTO_AES_ARM64_BS=m
+CONFIG_CRYPTO_SHA512_ARM64_CE=m
+CONFIG_CRYPTO_SHA3_ARM64=m
+CONFIG_CRYPTO_SM3_ARM64_CE=m
+CONFIG_QTI_RPMH_MBOX=y
+CONFIG_QTI_RPMH_API=y
+CONFIG_QCOM_COMMAND_DB=y
+CONFIG_REGULATOR_RPMH=y
+CONFIG_QCOM_GENI_SE=y
diff --git a/rr-cache/3152d54bb8498f5b3a678c7d7e9641f795736fa8/thisimage b/rr-cache/3152d54bb8498f5b3a678c7d7e9641f795736fa8/thisimage
new file mode 100644
index 0000000..0ce3472
--- /dev/null
+++ b/rr-cache/3152d54bb8498f5b3a678c7d7e9641f795736fa8/thisimage
@@ -0,0 +1,801 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_NUMA_BALANCING=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_USER_NS=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_JUMP_LABEL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_ARCH_SUNXI=y
+CONFIG_ARCH_ALPINE=y
+CONFIG_ARCH_BCM2835=y
+CONFIG_ARCH_BCM_IPROC=y
+CONFIG_ARCH_BERLIN=y
+CONFIG_ARCH_BRCMSTB=y
+CONFIG_ARCH_EXYNOS=y
+CONFIG_ARCH_LAYERSCAPE=y
+CONFIG_ARCH_LG1K=y
+CONFIG_ARCH_HISI=y
+CONFIG_ARCH_MEDIATEK=y
+CONFIG_ARCH_MESON=y
+CONFIG_ARCH_MVEBU=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_MSM8996=y
+CONFIG_ARCH_ROCKCHIP=y
+CONFIG_ARCH_SEATTLE=y
+CONFIG_ARCH_RENESAS=y
+CONFIG_ARCH_R8A7795=y
+CONFIG_ARCH_R8A7796=y
+CONFIG_ARCH_R8A77965=y
+CONFIG_ARCH_R8A77970=y
+CONFIG_ARCH_R8A77980=y
+CONFIG_ARCH_R8A77990=y
+CONFIG_ARCH_R8A77995=y
+CONFIG_ARCH_STRATIX10=y
+CONFIG_ARCH_TEGRA=y
+CONFIG_ARCH_SPRD=y
+CONFIG_ARCH_SYNQUACER=y
+CONFIG_ARCH_THUNDER=y
+CONFIG_ARCH_THUNDER2=y
+CONFIG_ARCH_UNIPHIER=y
+CONFIG_ARCH_VEXPRESS=y
+CONFIG_ARCH_XGENE=y
+CONFIG_ARCH_ZX=y
+CONFIG_ARCH_ZYNQMP=y
+CONFIG_PCI=y
+CONFIG_HOTPLUG_PCI_PCIE=y
+CONFIG_PCI_IOV=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_ACPI=y
+CONFIG_PCI_LAYERSCAPE=y
+CONFIG_PCI_HISI=y
+CONFIG_PCIE_QCOM=y
+CONFIG_PCIE_KIRIN=y
+CONFIG_PCIE_ARMADA_8K=y
+CONFIG_PCIE_HISI_STB=y
+CONFIG_PCI_AARDVARK=y
+CONFIG_PCI_TEGRA=y
+CONFIG_PCIE_RCAR=y
+CONFIG_PCIE_ROCKCHIP=y
+CONFIG_PCIE_ROCKCHIP_HOST=m
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PCI_XGENE=y
+CONFIG_PCI_HOST_THUNDER_PEM=y
+CONFIG_PCI_HOST_THUNDER_ECAM=y
+CONFIG_ARM64_VA_BITS_48=y
+CONFIG_SCHED_MC=y
+CONFIG_NUMA=y
+CONFIG_PREEMPT=y
+CONFIG_KSM=y
+CONFIG_MEMORY_FAILURE=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CMA=y
+CONFIG_SECCOMP=y
+CONFIG_KEXEC=y
+CONFIG_CRASH_DUMP=y
+CONFIG_XEN=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_COMPAT=y
+CONFIG_HIBERNATION=y
+CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_ATTR_SET=y
+CONFIG_CPU_FREQ_GOV_COMMON=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=m
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPUFREQ_DT=y
+CONFIG_ARM_ARMADA_37XX_CPUFREQ=y
+CONFIG_ARM_BIG_LITTLE_CPUFREQ=y
+CONFIG_ARM_SCPI_CPUFREQ=y
+CONFIG_ARM_TEGRA186_CPUFREQ=y
+CONFIG_ACPI_CPPC_CPUFREQ=m
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IPV6=m
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_NAT=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_BRIDGE=m
+CONFIG_BRIDGE_VLAN_FILTERING=y
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_VLAN_8021Q_MVRP=y
+CONFIG_BPF_JIT=y
+CONFIG_BT=m
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=m
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=m
+# CONFIG_BT_HS is not set
+# CONFIG_BT_LE is not set
+CONFIG_BT_LEDS=y
+<<<<<<<
+# CONFIG_BT_DEBUGFS is not set
+CONFIG_BT_HCIBTUSB=m
+=======
+CONFIG_BT_HCIBTUSB=m
+CONFIG_BT_HCIBTSDIO=m
+>>>>>>>
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_BCSP=y
+CONFIG_BT_HCIUART_LL=y
+<<<<<<<
+CONFIG_BT_HCIUART_3WIRE=y
+CONFIG_CFG80211=y
+# CONFIG_CFG80211_DEFAULT_PS is not set
+CONFIG_CFG80211_WEXT=y
+CONFIG_MAC80211=y
+=======
+CONFIG_BT_HCIUART_BCM=y
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
+>>>>>>>
+CONFIG_MAC80211_LEDS=y
+CONFIG_RFKILL=y
+CONFIG_WCN36XX=m
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DMA_CMA=y
+<<<<<<<
+=======
+CONFIG_CMA_SIZE_MBYTES=32
+CONFIG_HISILICON_LPC=y
+>>>>>>>
+CONFIG_SIMPLE_PM_BUS=y
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_DENALI_DT=y
+CONFIG_MTD_NAND_MARVELL=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_NBD=m
+CONFIG_VIRTIO_BLK=y
+CONFIG_BLK_DEV_NVME=m
+CONFIG_QCOM_COINCELL=m
+CONFIG_SRAM=y
+CONFIG_EEPROM_AT25=m
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+CONFIG_SCSI_SAS_ATA=y
+CONFIG_SCSI_HISI_SAS=y
+CONFIG_SCSI_HISI_SAS_PCI=y
+<<<<<<<
+CONFIG_SCSI_UFSHCD=m
+CONFIG_SCSI_UFSHCD_PLATFORM=m
+CONFIG_SCSI_UFS_QCOM=m
+=======
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+>>>>>>>
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_AHCI_CEVA=y
+CONFIG_AHCI_MVEBU=y
+CONFIG_AHCI_XGENE=y
+CONFIG_AHCI_QORIQ=y
+CONFIG_SATA_SIL24=y
+CONFIG_SATA_RCAR=y
+CONFIG_PATA_PLATFORM=y
+CONFIG_PATA_OF_PLATFORM=y
+CONFIG_NETDEVICES=y
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_TUN=y
+CONFIG_VETH=m
+CONFIG_VIRTIO_NET=y
+CONFIG_AMD_XGBE=y
+CONFIG_NET_XGENE=y
+<<<<<<<
+CONFIG_ATL1C=m
+=======
+CONFIG_ATL1C=y
+>>>>>>>
+CONFIG_MACB=y
+CONFIG_THUNDER_NIC_PF=y
+CONFIG_HIX5HD2_GMAC=y
+CONFIG_HNS_DSAF=y
+CONFIG_HNS_ENET=y
+CONFIG_E1000E=y
+CONFIG_IGB=y
+CONFIG_IGBVF=y
+CONFIG_MVNETA=y
+CONFIG_MVPP2=y
+CONFIG_SKY2=y
+CONFIG_QCOM_EMAC=m
+CONFIG_RAVB=y
+CONFIG_SMC91X=y
+CONFIG_SMSC911X=y
+CONFIG_SNI_AVE=y
+CONFIG_SNI_NETSEC=y
+CONFIG_STMMAC_ETH=m
+CONFIG_DWMAC_IPQ806X=m
+CONFIG_DWMAC_MESON=m
+CONFIG_DWMAC_ROCKCHIP=m
+CONFIG_DWMAC_SUNXI=m
+CONFIG_DWMAC_SUN8I=m
+CONFIG_MDIO_BUS_MUX_MMIOREG=y
+CONFIG_AT803X_PHY=m
+CONFIG_MARVELL_PHY=m
+CONFIG_MARVELL_10G_PHY=m
+CONFIG_MESON_GXL_PHY=m
+CONFIG_MICREL_PHY=y
+CONFIG_REALTEK_PHY=m
+CONFIG_ROCKCHIP_PHY=y
+CONFIG_USB_PEGASUS=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_RTL8152=m
+CONFIG_USB_LAN78XX=m
+CONFIG_USB_USBNET=m
+CONFIG_USB_NET_DM9601=m
+CONFIG_USB_NET_SR9800=m
+CONFIG_USB_NET_SMSC75XX=m
+CONFIG_USB_NET_SMSC95XX=m
+CONFIG_USB_NET_PLUSB=m
+CONFIG_USB_NET_MCS7830=m
+CONFIG_ATH10K=m
+CONFIG_ATH10K_PCI=m
+CONFIG_BRCMFMAC=m
+<<<<<<<
+CONFIG_ATH10K=y
+CONFIG_ATH10K_PCI=y
+=======
+CONFIG_MWIFIEX=m
+CONFIG_MWIFIEX_PCIE=m
+>>>>>>>
+CONFIG_WL18XX=m
+CONFIG_WLCORE_SDIO=m
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_ADC=m
+CONFIG_KEYBOARD_CROS_EC=y
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ATMEL_MXT=m
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_PM8941_PWRKEY=y
+CONFIG_INPUT_HISI_POWERKEY=y
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_AMBAKMI=y
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_BCM2835AUX=y
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_8250_MT6577=y
+CONFIG_SERIAL_8250_UNIPHIER=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_MESON=y
+CONFIG_SERIAL_MESON_CONSOLE=y
+CONFIG_SERIAL_SAMSUNG=y
+CONFIG_SERIAL_SAMSUNG_CONSOLE=y
+CONFIG_SERIAL_TEGRA=y
+CONFIG_SERIAL_SH_SCI=y
+CONFIG_SERIAL_SH_SCI_NR_UARTS=11
+CONFIG_SERIAL_SH_SCI_CONSOLE=y
+CONFIG_SERIAL_MSM=y
+CONFIG_SERIAL_QCOM_GENI=y
+CONFIG_SERIAL_QCOM_GENI_CONSOLE=y
+CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_SERIAL_XILINX_PS_UART=y
+CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
+CONFIG_SERIAL_MVEBU_UART=y
+CONFIG_SERIAL_DEV_BUS=y
+CONFIG_SERIAL_DEV_CTRL_TTYPORT=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_I2C_HID=m
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_MUX_PCA954x=y
+CONFIG_I2C_BCM2835=m
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
+CONFIG_I2C_IMX=y
+CONFIG_I2C_MESON=y
+CONFIG_I2C_MV64XXX=y
+CONFIG_I2C_PXA=y
+CONFIG_I2C_QUP=y
+CONFIG_I2C_RK3X=y
+CONFIG_I2C_SH_MOBILE=y
+CONFIG_I2C_TEGRA=y
+CONFIG_I2C_UNIPHIER_F=y
+CONFIG_I2C_RCAR=y
+CONFIG_I2C_CROS_EC_TUNNEL=y
+CONFIG_SPI=y
+CONFIG_SPI_ARMADA_3700=y
+CONFIG_SPI_MESON_SPICC=m
+CONFIG_SPI_MESON_SPIFC=m
+CONFIG_SPI_BCM2835=m
+CONFIG_SPI_BCM2835AUX=m
+CONFIG_SPI_ORION=y
+CONFIG_SPI_PL022=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_ROCKCHIP=y
+CONFIG_SPI_S3C64XX=y
+CONFIG_SPI_SPIDEV=m
+CONFIG_SPMI=y
+CONFIG_PINCTRL_IPQ8074=y
+CONFIG_PINCTRL_SINGLE=y
+CONFIG_PINCTRL_MAX77620=y
+CONFIG_PINCTRL_MSM8916=y
+CONFIG_PINCTRL_MSM8994=y
+CONFIG_PINCTRL_MSM8996=y
+<<<<<<<
+CONFIG_PINCTRL_MT7622=y
+=======
+CONFIG_PINCTRL_SDM845=y
+>>>>>>>
+CONFIG_PINCTRL_QDF2XXX=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_GPIO_DWAPB=y
+CONFIG_GPIO_MB86S7X=y
+CONFIG_GPIO_PL061=y
+CONFIG_GPIO_RCAR=y
+CONFIG_GPIO_UNIPHIER=y
+CONFIG_GPIO_XGENE=y
+CONFIG_GPIO_XGENE_SB=y
+CONFIG_GPIO_PCA953X=y
+CONFIG_GPIO_PCA953X_IRQ=y
+CONFIG_GPIO_MAX77620=y
+CONFIG_POWER_AVS=y
+<<<<<<<
+=======
+CONFIG_ROCKCHIP_IODOMAIN=y
+CONFIG_QCOM_CPR=y
+>>>>>>>
+CONFIG_POWER_RESET_MSM=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_SYSCON_REBOOT_MODE=y
+CONFIG_BATTERY_BQ27XXX=y
+CONFIG_SENSORS_ARM_SCPI=y
+CONFIG_SENSORS_LM90=m
+CONFIG_SENSORS_INA2XX=m
+CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
+CONFIG_CPU_THERMAL=y
+CONFIG_THERMAL_EMULATION=y
+CONFIG_ARMADA_THERMAL=y
+CONFIG_BRCMSTB_THERMAL=m
+CONFIG_EXYNOS_THERMAL=y
+CONFIG_RCAR_GEN3_THERMAL=y
+CONFIG_QCOM_TSENS=y
+CONFIG_ROCKCHIP_THERMAL=m
+CONFIG_TEGRA_BPMP_THERMAL=m
+CONFIG_QCOM_SPMI_TEMP_ALARM=m
+CONFIG_UNIPHIER_THERMAL=y
+CONFIG_WATCHDOG=y
+CONFIG_S3C2410_WATCHDOG=y
+CONFIG_QCOM_WDT=m
+CONFIG_MESON_GXBB_WATCHDOG=m
+CONFIG_MESON_WATCHDOG=m
+CONFIG_RENESAS_WDT=y
+CONFIG_UNIPHIER_WATCHDOG=y
+CONFIG_BCM2835_WDT=y
+CONFIG_MFD_AXP20X_RSB=y
+CONFIG_MFD_CROS_EC=y
+CONFIG_MFD_CROS_EC_I2C=y
+CONFIG_MFD_CROS_EC_SPI=y
+CONFIG_MFD_CROS_EC_CHARDEV=m
+CONFIG_MFD_EXYNOS_LPASS=m
+CONFIG_MFD_HI6421_PMIC=y
+CONFIG_MFD_HI655X_PMIC=y
+CONFIG_MFD_MAX77620=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_RK808=y
+CONFIG_MFD_SEC_CORE=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_AXP20X=y
+CONFIG_REGULATOR_FAN53555=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_GPIO=y
+CONFIG_REGULATOR_HI6421V530=y
+CONFIG_REGULATOR_HI655X=y
+CONFIG_REGULATOR_MAX77620=y
+CONFIG_REGULATOR_PWM=y
+CONFIG_REGULATOR_QCOM_RPMH=y
+CONFIG_REGULATOR_QCOM_SMD_RPM=y
+CONFIG_REGULATOR_QCOM_SPMI=y
+CONFIG_REGULATOR_RK808=y
+CONFIG_REGULATOR_S2MPS11=y
+CONFIG_MEDIA_SUPPORT=m
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_ANALOG_TV_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_MEDIA_RC_SUPPORT=y
+CONFIG_RC_CORE=m
+CONFIG_RC_DEVICES=y
+CONFIG_RC_DECODERS=y
+CONFIG_IR_MESON=m
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+# CONFIG_DVB_NET is not set
+CONFIG_V4L_MEM2MEM_DRIVERS=y
+CONFIG_VIDEO_SAMSUNG_S5P_JPEG=m
+CONFIG_VIDEO_SAMSUNG_S5P_MFC=m
+CONFIG_VIDEO_SAMSUNG_EXYNOS_GSC=m
+CONFIG_VIDEO_RENESAS_FCP=m
+CONFIG_VIDEO_RENESAS_VSP1=m
+CONFIG_DRM=y
+CONFIG_DRM_NOUVEAU=m
+CONFIG_DRM_EXYNOS=m
+CONFIG_DRM_EXYNOS5433_DECON=y
+CONFIG_DRM_EXYNOS7_DECON=y
+CONFIG_DRM_EXYNOS_DSI=y
+# CONFIG_DRM_EXYNOS_DP is not set
+CONFIG_DRM_EXYNOS_HDMI=y
+CONFIG_DRM_EXYNOS_MIC=y
+CONFIG_DRM_ROCKCHIP=m
+CONFIG_ROCKCHIP_ANALOGIX_DP=y
+CONFIG_ROCKCHIP_CDN_DP=y
+CONFIG_ROCKCHIP_DW_HDMI=y
+CONFIG_ROCKCHIP_DW_MIPI_DSI=y
+CONFIG_ROCKCHIP_INNO_HDMI=y
+CONFIG_DRM_RCAR_DU=m
+CONFIG_DRM_RCAR_LVDS=y
+CONFIG_DRM_RCAR_VSP=y
+CONFIG_DRM_TEGRA=m
+CONFIG_DRM_PANEL_SIMPLE=m
+CONFIG_DRM_I2C_ADV7511=m
+CONFIG_DRM_I2C_ADV7511_AUDIO=y
+CONFIG_DRM_VC4=m
+CONFIG_DRM_HISI_HIBMC=m
+CONFIG_DRM_HISI_KIRIN=m
+CONFIG_DRM_MESON=m
+CONFIG_FB=y
+CONFIG_FB_ARMCLCD=y
+CONFIG_BACKLIGHT_GENERIC=m
+CONFIG_BACKLIGHT_PWM=m
+CONFIG_BACKLIGHT_LP855X=m
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SND_BCM2835_SOC_I2S=m
+CONFIG_SND_SOC_QCOM=y
+CONFIG_SND_SOC_APQ8016_SBC=y
+CONFIG_SND_SOC_QDSP6=y
+CONFIG_SND_SOC_MSM8996=y
+CONFIG_SND_SOC_SAMSUNG=y
+CONFIG_SND_SOC_RCAR=m
+CONFIG_SND_SOC_AK4613=m
+<<<<<<<
+CONFIG_SND_SIMPLE_CARD=m
+CONFIG_SND_AUDIO_GRAPH_CARD=m
+=======
+CONFIG_SND_SOC_MSM8916_WCD_ANALOG=y
+CONFIG_SND_SOC_MSM8916_WCD_DIGITAL=y
+CONFIG_SND_SIMPLE_CARD=y
+>>>>>>>
+CONFIG_USB=y
+CONFIG_USB_OTG=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_TEGRA=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_EXYNOS=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_EXYNOS=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_RENESAS_USBHS=m
+CONFIG_USB_STORAGE=y
+CONFIG_USB_MUSB_HDRC=y
+CONFIG_USB_MUSB_SUNXI=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC2=y
+CONFIG_USB_CHIPIDEA=y
+CONFIG_USB_CHIPIDEA_UDC=y
+CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_USB_CHIPIDEA_ULPI=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_HSIC_USB3503=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_ULPI=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_RENESAS_USBHS_UDC=m
+CONFIG_USB_RENESAS_USB3=m
+CONFIG_USB_ULPI_BUS=y
+CONFIG_MMC=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_ARMMMCI=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_ACPI=y
+CONFIG_MMC_SDHCI_F_SDH30=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_OF_ARASAN=y
+CONFIG_MMC_SDHCI_OF_ESDHC=y
+CONFIG_MMC_SDHCI_CADENCE=y
+CONFIG_MMC_SDHCI_TEGRA=y
+CONFIG_MMC_MESON_GX=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_SPI=y
+CONFIG_MMC_SDHI=y
+CONFIG_MMC_DW=y
+CONFIG_MMC_DW_EXYNOS=y
+CONFIG_MMC_DW_HI3798CV200=y
+CONFIG_MMC_DW_K3=y
+CONFIG_MMC_DW_ROCKCHIP=y
+CONFIG_MMC_SUNXI=y
+CONFIG_MMC_BCM2835=y
+CONFIG_MMC_SDHCI_XENON=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_PWM=y
+CONFIG_LEDS_SYSCON=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_LEDS_TRIGGER_PANIC=y
+CONFIG_LEDS_TRIGGER_DISK=y
+CONFIG_EDAC=y
+CONFIG_EDAC_GHES=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_MAX77686=y
+CONFIG_RTC_DRV_RK808=m
+CONFIG_RTC_DRV_S5M=y
+CONFIG_RTC_DRV_DS3232=y
+CONFIG_RTC_DRV_EFI=y
+CONFIG_RTC_DRV_S3C=y
+CONFIG_RTC_DRV_PL031=y
+CONFIG_RTC_DRV_SUN6I=y
+CONFIG_RTC_DRV_ARMADA38X=y
+CONFIG_RTC_DRV_PM8XXX=m
+CONFIG_RTC_DRV_TEGRA=y
+CONFIG_RTC_DRV_XGENE=y
+CONFIG_RTC_DRV_CROS_EC=y
+CONFIG_DMADEVICES=y
+CONFIG_DMA_BCM2835=m
+CONFIG_K3_DMA=y
+CONFIG_MV_XOR_V2=y
+CONFIG_PL330_DMA=y
+CONFIG_TEGRA20_APB_DMA=y
+CONFIG_QCOM_BAM_DMA=y
+CONFIG_QCOM_HIDMA_MGMT=y
+CONFIG_QCOM_HIDMA=y
+CONFIG_RCAR_DMAC=y
+CONFIG_RENESAS_USB_DMAC=m
+CONFIG_VFIO=y
+CONFIG_VFIO_PCI=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_XEN_GNTDEV=y
+CONFIG_XEN_GRANT_DEV_ALLOC=y
+CONFIG_COMMON_CLK_RK808=y
+CONFIG_COMMON_CLK_SCPI=y
+CONFIG_COMMON_CLK_CS2000_CP=y
+CONFIG_COMMON_CLK_S2MPS11=y
+CONFIG_CLK_QORIQ=y
+CONFIG_COMMON_CLK_PWM=y
+CONFIG_COMMON_CLK_QCOM=y
+CONFIG_QCOM_CLK_SMD_RPM=y
+CONFIG_IPQ_GCC_8074=y
+CONFIG_MSM_CLK_RPMH=y
+CONFIG_MSM_GCC_8916=y
+CONFIG_MSM_GCC_8994=y
+CONFIG_MSM_MMCC_8996=y
+CONFIG_MSM_APCC_8996=y
+CONFIG_HWSPINLOCK=y
+CONFIG_SDM_GCC_845=y
+CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_ARM_MHU=y
+CONFIG_PLATFORM_MHU=y
+CONFIG_BCM2835_MBOX=y
+CONFIG_HI6220_MBOX=y
+CONFIG_QCOM_APCS_IPC=y
+CONFIG_ROCKCHIP_IOMMU=y
+CONFIG_TEGRA_IOMMU_SMMU=y
+CONFIG_ARM_SMMU=y
+CONFIG_ARM_SMMU_V3=y
+CONFIG_QCOM_IOMMU=y
+<<<<<<<
+CONFIG_REMOTEPROC=y
+CONFIG_QCOM_ADSP_PIL=y
+CONFIG_QCOM_Q6V5_PIL=y
+CONFIG_QCOM_WCNSS_PIL=y
+CONFIG_RPMSG_QCOM_GLINK_RPM=y
+CONFIG_RPMSG_QCOM_GLINK_SMEM=y
+=======
+CONFIG_RPMSG_QCOM_GLINK_RPM=y
+>>>>>>>
+CONFIG_RPMSG_QCOM_SMD=y
+CONFIG_RASPBERRYPI_POWER=y
+CONFIG_QCOM_RPMH=y
+CONFIG_QCOM_SMEM=y
+CONFIG_QCOM_SMD_RPM=y
+CONFIG_QCOM_SMP2P=y
+CONFIG_QCOM_SMSM=y
+CONFIG_QCOM_WCNSS_CTRL=y
+CONFIG_QCOM_APR=y
+CONFIG_ROCKCHIP_PM_DOMAINS=y
+CONFIG_ARCH_TEGRA_132_SOC=y
+CONFIG_ARCH_TEGRA_210_SOC=y
+CONFIG_ARCH_TEGRA_186_SOC=y
+CONFIG_ARCH_TEGRA_194_SOC=y
+CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
+CONFIG_EXTCON_USB_GPIO=y
+CONFIG_EXTCON_USBC_CROS_EC=y
+CONFIG_MEMORY=y
+CONFIG_TEGRA_MC=y
+CONFIG_IIO=y
+CONFIG_EXYNOS_ADC=y
+CONFIG_ROCKCHIP_SARADC=m
+CONFIG_IIO_CROS_EC_SENSORS_CORE=m
+CONFIG_IIO_CROS_EC_SENSORS=m
+CONFIG_IIO_CROS_EC_LIGHT_PROX=m
+CONFIG_IIO_CROS_EC_BARO=m
+CONFIG_PWM=y
+CONFIG_PWM_BCM2835=m
+CONFIG_PWM_CROS_EC=m
+CONFIG_PWM_MESON=m
+CONFIG_PWM_RCAR=m
+CONFIG_PWM_ROCKCHIP=y
+CONFIG_PWM_SAMSUNG=y
+CONFIG_PWM_TEGRA=m
+CONFIG_PHY_HISTB_COMBPHY=y
+CONFIG_PHY_HISI_INNO_USB2=y
+CONFIG_PHY_RCAR_GEN3_USB2=y
+CONFIG_PHY_RCAR_GEN3_USB3=m
+CONFIG_PHY_HI6220_USB=y
+CONFIG_PHY_QCOM_QMP=y
+CONFIG_PHY_QCOM_USB_HS=y
+CONFIG_PHY_SUN4I_USB=y
+CONFIG_PHY_MVEBU_CP110_COMPHY=y
+CONFIG_PHY_QCOM_QMP=m
+CONFIG_PHY_ROCKCHIP_INNO_USB2=y
+CONFIG_PHY_ROCKCHIP_EMMC=y
+CONFIG_PHY_ROCKCHIP_PCIE=m
+CONFIG_PHY_ROCKCHIP_TYPEC=y
+CONFIG_PHY_XGENE=y
+CONFIG_PHY_TEGRA_XUSB=y
+CONFIG_QCOM_L2_PMU=y
+CONFIG_QCOM_L3_PMU=y
+CONFIG_MESON_EFUSE=m
+CONFIG_QCOM_QFPROM=y
+CONFIG_ROCKCHIP_EFUSE=y
+CONFIG_UNIPHIER_EFUSE=y
+CONFIG_TEE=y
+CONFIG_OPTEE=y
+CONFIG_ARM_SCPI_PROTOCOL=y
+CONFIG_RASPBERRYPI_FIRMWARE=y
+CONFIG_EFI_CAPSULE_LOADER=y
+CONFIG_ACPI=y
+CONFIG_ACPI_APEI=y
+CONFIG_ACPI_APEI_GHES=y
+CONFIG_ACPI_APEI_PCIEAER=y
+CONFIG_ACPI_APEI_MEMORY_FAILURE=y
+CONFIG_ACPI_APEI_EINJ=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
+CONFIG_QUOTA=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
+CONFIG_VFAT_FS=y
+CONFIG_HUGETLBFS=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_EFIVAR_FS=y
+CONFIG_SQUASHFS=y
+CONFIG_UFS_FS=y
+CONFIG_UFS_FS_WRITE=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
+CONFIG_9P_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_LOCKUP_DETECTOR=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_FTRACE is not set
+CONFIG_MEMTEST=y
+CONFIG_SECURITY=y
+CONFIG_CRYPTO_ECHAINIV=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA256_ARM64=m
+CONFIG_CRYPTO_SHA512_ARM64=m
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m
+CONFIG_CRYPTO_CRC32_ARM64_CE=m
+CONFIG_CRYPTO_AES_ARM64=m
+CONFIG_CRYPTO_AES_ARM64_CE=m
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=m
+CONFIG_CRYPTO_CHACHA20_NEON=m
+CONFIG_CRYPTO_AES_ARM64_BS=m
+CONFIG_CRYPTO_SHA512_ARM64_CE=m
+CONFIG_CRYPTO_SHA3_ARM64=m
+CONFIG_CRYPTO_SM3_ARM64_CE=m
+CONFIG_QTI_RPMH_MBOX=y
+CONFIG_QTI_RPMH_API=y
+CONFIG_QCOM_COMMAND_DB=y
+CONFIG_REGULATOR_RPMH=y
+CONFIG_QCOM_GENI_SE=y
diff --git a/rr-cache/321216f1938295007a308be9906b0a7e68880687/postimage b/rr-cache/321216f1938295007a308be9906b0a7e68880687/postimage
new file mode 100644
index 0000000..1ec00ff
--- /dev/null
+++ b/rr-cache/321216f1938295007a308be9906b0a7e68880687/postimage
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __Q6DSP_ERR_NO_H__
+#define __Q6DSP_ERR_NO_H__
+#include <linux/kernel.h>
+
+/* Success. The operation completed with no errors. */
+#define ADSP_EOK 0x00000000
+/* General failure. */
+#define ADSP_EFAILED 0x00000001
+/* Bad operation parameter. */
+#define ADSP_EBADPARAM 0x00000002
+/* Unsupported routine or operation. */
+#define ADSP_EUNSUPPORTED 0x00000003
+/* Unsupported version. */
+#define ADSP_EVERSION 0x00000004
+/* Unexpected problem encountered. */
+#define ADSP_EUNEXPECTED 0x00000005
+/* Unhandled problem occurred. */
+#define ADSP_EPANIC 0x00000006
+/* Unable to allocate resource. */
+#define ADSP_ENORESOURCE 0x00000007
+/* Invalid handle. */
+#define ADSP_EHANDLE 0x00000008
+/* Operation is already processed. */
+#define ADSP_EALREADY 0x00000009
+/* Operation is not ready to be processed. */
+#define ADSP_ENOTREADY 0x0000000A
+/* Operation is pending completion. */
+#define ADSP_EPENDING 0x0000000B
+/* Operation could not be accepted or processed. */
+#define ADSP_EBUSY 0x0000000C
+/* Operation aborted due to an error. */
+#define ADSP_EABORTED 0x0000000D
+/* Operation preempted by a higher priority. */
+#define ADSP_EPREEMPTED 0x0000000E
+/* Operation requests intervention to complete. */
+#define ADSP_ECONTINUE 0x0000000F
+/* Operation requests immediate intervention to complete. */
+#define ADSP_EIMMEDIATE 0x00000010
+/* Operation is not implemented. */
+#define ADSP_ENOTIMPL 0x00000011
+/* Operation needs more data or resources. */
+#define ADSP_ENEEDMORE 0x00000012
+/* Operation does not have memory. */
+#define ADSP_ENOMEMORY 0x00000014
+/* Item does not exist. */
+#define ADSP_ENOTEXIST 0x00000015
+/* Max count for adsp error code sent to HLOS*/
+
+#endif /*__Q6DSP_ERR_NO_H__ */
diff --git a/rr-cache/321216f1938295007a308be9906b0a7e68880687/preimage b/rr-cache/321216f1938295007a308be9906b0a7e68880687/preimage
new file mode 100644
index 0000000..3395cb2
--- /dev/null
+++ b/rr-cache/321216f1938295007a308be9906b0a7e68880687/preimage
@@ -0,0 +1,104 @@
+<<<<<<<
+/* SPDX-License-Identifier: GPL-2.0 */
+=======
+// SPDX-License-Identifier: GPL-2.0
+>>>>>>>
+
+#ifndef __Q6DSP_ERR_NO_H__
+#define __Q6DSP_ERR_NO_H__
+#include <linux/kernel.h>
+
+/* Success. The operation completed with no errors. */
+#define ADSP_EOK 0x00000000
+/* General failure. */
+#define ADSP_EFAILED 0x00000001
+/* Bad operation parameter. */
+#define ADSP_EBADPARAM 0x00000002
+/* Unsupported routine or operation. */
+#define ADSP_EUNSUPPORTED 0x00000003
+/* Unsupported version. */
+#define ADSP_EVERSION 0x00000004
+/* Unexpected problem encountered. */
+#define ADSP_EUNEXPECTED 0x00000005
+/* Unhandled problem occurred. */
+#define ADSP_EPANIC 0x00000006
+/* Unable to allocate resource. */
+#define ADSP_ENORESOURCE 0x00000007
+/* Invalid handle. */
+#define ADSP_EHANDLE 0x00000008
+/* Operation is already processed. */
+#define ADSP_EALREADY 0x00000009
+/* Operation is not ready to be processed. */
+#define ADSP_ENOTREADY 0x0000000A
+/* Operation is pending completion. */
+#define ADSP_EPENDING 0x0000000B
+/* Operation could not be accepted or processed. */
+#define ADSP_EBUSY 0x0000000C
+/* Operation aborted due to an error. */
+#define ADSP_EABORTED 0x0000000D
+/* Operation preempted by a higher priority. */
+#define ADSP_EPREEMPTED 0x0000000E
+/* Operation requests intervention to complete. */
+#define ADSP_ECONTINUE 0x0000000F
+/* Operation requests immediate intervention to complete. */
+#define ADSP_EIMMEDIATE 0x00000010
+/* Operation is not implemented. */
+#define ADSP_ENOTIMPL 0x00000011
+/* Operation needs more data or resources. */
+#define ADSP_ENEEDMORE 0x00000012
+/* Operation does not have memory. */
+#define ADSP_ENOMEMORY 0x00000014
+/* Item does not exist. */
+#define ADSP_ENOTEXIST 0x00000015
+/* Max count for adsp error code sent to HLOS*/
+
+<<<<<<<
+=======
+struct q6dsp_err_code {
+ int lnx_err_code;
+ char *adsp_err_str;
+};
+
+static struct q6dsp_err_code q6dsp_err_codes[] = {
+ [ADSP_EFAILED] = { -ENOTRECOVERABLE, "ADSP_EFAILED"},
+ [ADSP_EBADPARAM] = { -EINVAL, "ADSP_EBADPARAM"},
+ [ADSP_EUNSUPPORTED] = { -ENOSYS, "ADSP_EUNSUPPORTED"},
+ [ADSP_EVERSION] = { -ENOPROTOOPT, "ADSP_EVERSION"},
+ [ADSP_EUNEXPECTED] = { -ENOTRECOVERABLE, "ADSP_EUNEXPECTED"},
+ [ADSP_EPANIC] = { -ENOTRECOVERABLE, "ADSP_EPANIC"},
+ [ADSP_ENORESOURCE] = { -ENOSPC, "ADSP_ENORESOURCE"},
+ [ADSP_EHANDLE] = { -EBADR, "ADSP_EHANDLE"},
+ [ADSP_EALREADY] = { -EALREADY, "ADSP_EALREADY"},
+ [ADSP_ENOTREADY] = { -EPERM, "ADSP_ENOTREADY"},
+ [ADSP_EPENDING] = { -EINPROGRESS, "ADSP_EPENDING"},
+ [ADSP_EBUSY] = { -EBUSY, "ADSP_EBUSY"},
+ [ADSP_EABORTED] = { -ECANCELED, "ADSP_EABORTED"},
+ [ADSP_EPREEMPTED] = { -EAGAIN, "ADSP_EPREEMPTED"},
+ [ADSP_ECONTINUE] = { -EAGAIN, "ADSP_ECONTINUE"},
+ [ADSP_EIMMEDIATE] = { -EAGAIN, "ADSP_EIMMEDIATE"},
+ [ADSP_ENOTIMPL] = { -EAGAIN, "ADSP_ENOTIMPL"},
+ [ADSP_ENEEDMORE] = { -ENODATA, "ADSP_ENEEDMORE"},
+ [ADSP_ENOMEMORY] = { -EINVAL, "ADSP_ENOMEMORY"},
+ [ADSP_ENOTEXIST] = { -ENOENT, "ADSP_ENOTEXIST"},
+};
+
+static inline int q6dsp_errno(u32 error)
+{
+ int ret = -EINVAL;
+
+ if (error <= ARRAY_SIZE(q6dsp_err_codes))
+ ret = q6dsp_err_codes[error].lnx_err_code;
+
+ return ret;
+}
+
+static inline char *q6dsp_strerror(u32 error)
+{
+ if (error <= ARRAY_SIZE(q6dsp_err_codes))
+ return q6dsp_err_codes[error].adsp_err_str;
+
+ return "ADSP_ERR_MAX";
+}
+
+>>>>>>>
+#endif /*__Q6DSP_ERR_NO_H__ */
diff --git a/rr-cache/321216f1938295007a308be9906b0a7e68880687/thisimage b/rr-cache/321216f1938295007a308be9906b0a7e68880687/thisimage
new file mode 100644
index 0000000..3395cb2
--- /dev/null
+++ b/rr-cache/321216f1938295007a308be9906b0a7e68880687/thisimage
@@ -0,0 +1,104 @@
+<<<<<<<
+/* SPDX-License-Identifier: GPL-2.0 */
+=======
+// SPDX-License-Identifier: GPL-2.0
+>>>>>>>
+
+#ifndef __Q6DSP_ERR_NO_H__
+#define __Q6DSP_ERR_NO_H__
+#include <linux/kernel.h>
+
+/* Success. The operation completed with no errors. */
+#define ADSP_EOK 0x00000000
+/* General failure. */
+#define ADSP_EFAILED 0x00000001
+/* Bad operation parameter. */
+#define ADSP_EBADPARAM 0x00000002
+/* Unsupported routine or operation. */
+#define ADSP_EUNSUPPORTED 0x00000003
+/* Unsupported version. */
+#define ADSP_EVERSION 0x00000004
+/* Unexpected problem encountered. */
+#define ADSP_EUNEXPECTED 0x00000005
+/* Unhandled problem occurred. */
+#define ADSP_EPANIC 0x00000006
+/* Unable to allocate resource. */
+#define ADSP_ENORESOURCE 0x00000007
+/* Invalid handle. */
+#define ADSP_EHANDLE 0x00000008
+/* Operation is already processed. */
+#define ADSP_EALREADY 0x00000009
+/* Operation is not ready to be processed. */
+#define ADSP_ENOTREADY 0x0000000A
+/* Operation is pending completion. */
+#define ADSP_EPENDING 0x0000000B
+/* Operation could not be accepted or processed. */
+#define ADSP_EBUSY 0x0000000C
+/* Operation aborted due to an error. */
+#define ADSP_EABORTED 0x0000000D
+/* Operation preempted by a higher priority. */
+#define ADSP_EPREEMPTED 0x0000000E
+/* Operation requests intervention to complete. */
+#define ADSP_ECONTINUE 0x0000000F
+/* Operation requests immediate intervention to complete. */
+#define ADSP_EIMMEDIATE 0x00000010
+/* Operation is not implemented. */
+#define ADSP_ENOTIMPL 0x00000011
+/* Operation needs more data or resources. */
+#define ADSP_ENEEDMORE 0x00000012
+/* Operation does not have memory. */
+#define ADSP_ENOMEMORY 0x00000014
+/* Item does not exist. */
+#define ADSP_ENOTEXIST 0x00000015
+/* Max count for adsp error code sent to HLOS*/
+
+<<<<<<<
+=======
+struct q6dsp_err_code {
+ int lnx_err_code;
+ char *adsp_err_str;
+};
+
+static struct q6dsp_err_code q6dsp_err_codes[] = {
+ [ADSP_EFAILED] = { -ENOTRECOVERABLE, "ADSP_EFAILED"},
+ [ADSP_EBADPARAM] = { -EINVAL, "ADSP_EBADPARAM"},
+ [ADSP_EUNSUPPORTED] = { -ENOSYS, "ADSP_EUNSUPPORTED"},
+ [ADSP_EVERSION] = { -ENOPROTOOPT, "ADSP_EVERSION"},
+ [ADSP_EUNEXPECTED] = { -ENOTRECOVERABLE, "ADSP_EUNEXPECTED"},
+ [ADSP_EPANIC] = { -ENOTRECOVERABLE, "ADSP_EPANIC"},
+ [ADSP_ENORESOURCE] = { -ENOSPC, "ADSP_ENORESOURCE"},
+ [ADSP_EHANDLE] = { -EBADR, "ADSP_EHANDLE"},
+ [ADSP_EALREADY] = { -EALREADY, "ADSP_EALREADY"},
+ [ADSP_ENOTREADY] = { -EPERM, "ADSP_ENOTREADY"},
+ [ADSP_EPENDING] = { -EINPROGRESS, "ADSP_EPENDING"},
+ [ADSP_EBUSY] = { -EBUSY, "ADSP_EBUSY"},
+ [ADSP_EABORTED] = { -ECANCELED, "ADSP_EABORTED"},
+ [ADSP_EPREEMPTED] = { -EAGAIN, "ADSP_EPREEMPTED"},
+ [ADSP_ECONTINUE] = { -EAGAIN, "ADSP_ECONTINUE"},
+ [ADSP_EIMMEDIATE] = { -EAGAIN, "ADSP_EIMMEDIATE"},
+ [ADSP_ENOTIMPL] = { -EAGAIN, "ADSP_ENOTIMPL"},
+ [ADSP_ENEEDMORE] = { -ENODATA, "ADSP_ENEEDMORE"},
+ [ADSP_ENOMEMORY] = { -EINVAL, "ADSP_ENOMEMORY"},
+ [ADSP_ENOTEXIST] = { -ENOENT, "ADSP_ENOTEXIST"},
+};
+
+static inline int q6dsp_errno(u32 error)
+{
+ int ret = -EINVAL;
+
+ if (error <= ARRAY_SIZE(q6dsp_err_codes))
+ ret = q6dsp_err_codes[error].lnx_err_code;
+
+ return ret;
+}
+
+static inline char *q6dsp_strerror(u32 error)
+{
+ if (error <= ARRAY_SIZE(q6dsp_err_codes))
+ return q6dsp_err_codes[error].adsp_err_str;
+
+ return "ADSP_ERR_MAX";
+}
+
+>>>>>>>
+#endif /*__Q6DSP_ERR_NO_H__ */
diff --git a/rr-cache/35d34cc98831cadf6f64867b703a11ec02db6365/postimage b/rr-cache/35d34cc98831cadf6f64867b703a11ec02db6365/postimage
new file mode 100644
index 0000000..333ed4a
--- /dev/null
+++ b/rr-cache/35d34cc98831cadf6f64867b703a11ec02db6365/postimage
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2017, Linaro Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mailbox_controller.h>
+
+#define QCOM_APCS_IPC_BITS 32
+
+struct qcom_apcs_ipc {
+ struct mbox_controller mbox;
+ struct mbox_chan mbox_chans[QCOM_APCS_IPC_BITS];
+
+ struct regmap *regmap;
+ unsigned long offset;
+ struct platform_device *clk;
+};
+
+static const struct regmap_config apcs_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1000,
+ .fast_io = true,
+};
+
+static int qcom_apcs_ipc_send_data(struct mbox_chan *chan, void *data)
+{
+ struct qcom_apcs_ipc *apcs = container_of(chan->mbox,
+ struct qcom_apcs_ipc, mbox);
+ unsigned long idx = (unsigned long)chan->con_priv;
+
+ return regmap_write(apcs->regmap, apcs->offset, BIT(idx));
+}
+
+static const struct mbox_chan_ops qcom_apcs_ipc_ops = {
+ .send_data = qcom_apcs_ipc_send_data,
+};
+
+static int qcom_apcs_ipc_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct qcom_apcs_ipc *apcs;
+ struct regmap *regmap;
+ struct resource *res;
+ unsigned long offset;
+ void __iomem *base;
+ unsigned long i;
+ int ret;
+
+ apcs = devm_kzalloc(&pdev->dev, sizeof(*apcs), GFP_KERNEL);
+ if (!apcs)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio(&pdev->dev, base, &apcs_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ offset = (unsigned long)of_device_get_match_data(&pdev->dev);
+
+ apcs->regmap = regmap;
+ apcs->offset = offset;
+
+ /* Initialize channel identifiers */
+ for (i = 0; i < ARRAY_SIZE(apcs->mbox_chans); i++)
+ apcs->mbox_chans[i].con_priv = (void *)i;
+
+ apcs->mbox.dev = &pdev->dev;
+ apcs->mbox.ops = &qcom_apcs_ipc_ops;
+ apcs->mbox.chans = apcs->mbox_chans;
+ apcs->mbox.num_chans = ARRAY_SIZE(apcs->mbox_chans);
+
+ ret = mbox_controller_register(&apcs->mbox);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register APCS IPC controller\n");
+ return ret;
+ }
+
+ if (of_device_is_compatible(np, "qcom,msm8916-apcs-kpss-global")) {
+ apcs->clk = platform_device_register_data(&pdev->dev,
+ "qcom-apcs-msm8916-clk",
+ -1, NULL, 0);
+ if (IS_ERR(apcs->clk))
+ dev_err(&pdev->dev, "failed to register APCS clk\n");
+ }
+
+ platform_set_drvdata(pdev, apcs);
+
+ return 0;
+}
+
+static int qcom_apcs_ipc_remove(struct platform_device *pdev)
+{
+ struct qcom_apcs_ipc *apcs = platform_get_drvdata(pdev);
+ struct platform_device *clk = apcs->clk;
+
+ mbox_controller_unregister(&apcs->mbox);
+ platform_device_unregister(clk);
+
+ return 0;
+}
+
+/* .data is the offset of the ipc register within the global block */
+static const struct of_device_id qcom_apcs_ipc_of_match[] = {
+ { .compatible = "qcom,msm8916-apcs-kpss-global", .data = (void *)8 },
+ { .compatible = "qcom,msm8996-apcs-hmss-global", .data = (void *)16 },
+ { .compatible = "qcom,msm8998-apcs-hmss-global", .data = (void *)8 },
+ { .compatible = "qcom,sdm845-apss-shared", .data = (void *)12 },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_apcs_ipc_of_match);
+
+static struct platform_driver qcom_apcs_ipc_driver = {
+ .probe = qcom_apcs_ipc_probe,
+ .remove = qcom_apcs_ipc_remove,
+ .driver = {
+ .name = "qcom_apcs_ipc",
+ .of_match_table = qcom_apcs_ipc_of_match,
+ },
+};
+
+static int __init qcom_apcs_ipc_init(void)
+{
+ return platform_driver_register(&qcom_apcs_ipc_driver);
+}
+postcore_initcall(qcom_apcs_ipc_init);
+
+static void __exit qcom_apcs_ipc_exit(void)
+{
+ platform_driver_unregister(&qcom_apcs_ipc_driver);
+}
+module_exit(qcom_apcs_ipc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm APCS IPC driver");
diff --git a/rr-cache/35d34cc98831cadf6f64867b703a11ec02db6365/preimage b/rr-cache/35d34cc98831cadf6f64867b703a11ec02db6365/preimage
new file mode 100644
index 0000000..a4e409d
--- /dev/null
+++ b/rr-cache/35d34cc98831cadf6f64867b703a11ec02db6365/preimage
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2017, Linaro Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mailbox_controller.h>
+
+#define QCOM_APCS_IPC_BITS 32
+
+struct qcom_apcs_ipc {
+ struct mbox_controller mbox;
+ struct mbox_chan mbox_chans[QCOM_APCS_IPC_BITS];
+
+ struct regmap *regmap;
+ unsigned long offset;
+ struct platform_device *clk;
+};
+
+static const struct regmap_config apcs_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1000,
+ .fast_io = true,
+};
+
+static int qcom_apcs_ipc_send_data(struct mbox_chan *chan, void *data)
+{
+ struct qcom_apcs_ipc *apcs = container_of(chan->mbox,
+ struct qcom_apcs_ipc, mbox);
+ unsigned long idx = (unsigned long)chan->con_priv;
+
+ return regmap_write(apcs->regmap, apcs->offset, BIT(idx));
+}
+
+static const struct mbox_chan_ops qcom_apcs_ipc_ops = {
+ .send_data = qcom_apcs_ipc_send_data,
+};
+
+static int qcom_apcs_ipc_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct qcom_apcs_ipc *apcs;
+ struct regmap *regmap;
+ struct resource *res;
+ unsigned long offset;
+ void __iomem *base;
+ unsigned long i;
+ int ret;
+
+ apcs = devm_kzalloc(&pdev->dev, sizeof(*apcs), GFP_KERNEL);
+ if (!apcs)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio(&pdev->dev, base, &apcs_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ offset = (unsigned long)of_device_get_match_data(&pdev->dev);
+
+ apcs->regmap = regmap;
+ apcs->offset = offset;
+
+ /* Initialize channel identifiers */
+ for (i = 0; i < ARRAY_SIZE(apcs->mbox_chans); i++)
+ apcs->mbox_chans[i].con_priv = (void *)i;
+
+ apcs->mbox.dev = &pdev->dev;
+ apcs->mbox.ops = &qcom_apcs_ipc_ops;
+ apcs->mbox.chans = apcs->mbox_chans;
+ apcs->mbox.num_chans = ARRAY_SIZE(apcs->mbox_chans);
+
+ ret = mbox_controller_register(&apcs->mbox);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register APCS IPC controller\n");
+ return ret;
+ }
+
+ if (of_device_is_compatible(np, "qcom,msm8916-apcs-kpss-global")) {
+ apcs->clk = platform_device_register_data(&pdev->dev,
+ "qcom-apcs-msm8916-clk",
+ -1, NULL, 0);
+ if (IS_ERR(apcs->clk))
+ dev_err(&pdev->dev, "failed to register APCS clk\n");
+ }
+
+ platform_set_drvdata(pdev, apcs);
+
+ return 0;
+}
+
+static int qcom_apcs_ipc_remove(struct platform_device *pdev)
+{
+ struct qcom_apcs_ipc *apcs = platform_get_drvdata(pdev);
+ struct platform_device *clk = apcs->clk;
+
+ mbox_controller_unregister(&apcs->mbox);
+ platform_device_unregister(clk);
+
+ return 0;
+}
+
+/* .data is the offset of the ipc register within the global block */
+static const struct of_device_id qcom_apcs_ipc_of_match[] = {
+ { .compatible = "qcom,msm8916-apcs-kpss-global", .data = (void *)8 },
+ { .compatible = "qcom,msm8996-apcs-hmss-global", .data = (void *)16 },
+<<<<<<<
+=======
+ { .compatible = "qcom,msm8998-apcs-hmss-global", .data = (void *)8 },
+>>>>>>>
+ { .compatible = "qcom,sdm845-apss-shared", .data = (void *)12 },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_apcs_ipc_of_match);
+
+static struct platform_driver qcom_apcs_ipc_driver = {
+ .probe = qcom_apcs_ipc_probe,
+ .remove = qcom_apcs_ipc_remove,
+ .driver = {
+ .name = "qcom_apcs_ipc",
+ .of_match_table = qcom_apcs_ipc_of_match,
+ },
+};
+
+static int __init qcom_apcs_ipc_init(void)
+{
+ return platform_driver_register(&qcom_apcs_ipc_driver);
+}
+postcore_initcall(qcom_apcs_ipc_init);
+
+static void __exit qcom_apcs_ipc_exit(void)
+{
+ platform_driver_unregister(&qcom_apcs_ipc_driver);
+}
+module_exit(qcom_apcs_ipc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm APCS IPC driver");
diff --git a/rr-cache/35d34cc98831cadf6f64867b703a11ec02db6365/thisimage b/rr-cache/35d34cc98831cadf6f64867b703a11ec02db6365/thisimage
new file mode 100644
index 0000000..a4e409d
--- /dev/null
+++ b/rr-cache/35d34cc98831cadf6f64867b703a11ec02db6365/thisimage
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2017, Linaro Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mailbox_controller.h>
+
+#define QCOM_APCS_IPC_BITS 32
+
+struct qcom_apcs_ipc {
+ struct mbox_controller mbox;
+ struct mbox_chan mbox_chans[QCOM_APCS_IPC_BITS];
+
+ struct regmap *regmap;
+ unsigned long offset;
+ struct platform_device *clk;
+};
+
+static const struct regmap_config apcs_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1000,
+ .fast_io = true,
+};
+
+static int qcom_apcs_ipc_send_data(struct mbox_chan *chan, void *data)
+{
+ struct qcom_apcs_ipc *apcs = container_of(chan->mbox,
+ struct qcom_apcs_ipc, mbox);
+ unsigned long idx = (unsigned long)chan->con_priv;
+
+ return regmap_write(apcs->regmap, apcs->offset, BIT(idx));
+}
+
+static const struct mbox_chan_ops qcom_apcs_ipc_ops = {
+ .send_data = qcom_apcs_ipc_send_data,
+};
+
+static int qcom_apcs_ipc_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct qcom_apcs_ipc *apcs;
+ struct regmap *regmap;
+ struct resource *res;
+ unsigned long offset;
+ void __iomem *base;
+ unsigned long i;
+ int ret;
+
+ apcs = devm_kzalloc(&pdev->dev, sizeof(*apcs), GFP_KERNEL);
+ if (!apcs)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio(&pdev->dev, base, &apcs_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ offset = (unsigned long)of_device_get_match_data(&pdev->dev);
+
+ apcs->regmap = regmap;
+ apcs->offset = offset;
+
+ /* Initialize channel identifiers */
+ for (i = 0; i < ARRAY_SIZE(apcs->mbox_chans); i++)
+ apcs->mbox_chans[i].con_priv = (void *)i;
+
+ apcs->mbox.dev = &pdev->dev;
+ apcs->mbox.ops = &qcom_apcs_ipc_ops;
+ apcs->mbox.chans = apcs->mbox_chans;
+ apcs->mbox.num_chans = ARRAY_SIZE(apcs->mbox_chans);
+
+ ret = mbox_controller_register(&apcs->mbox);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register APCS IPC controller\n");
+ return ret;
+ }
+
+ if (of_device_is_compatible(np, "qcom,msm8916-apcs-kpss-global")) {
+ apcs->clk = platform_device_register_data(&pdev->dev,
+ "qcom-apcs-msm8916-clk",
+ -1, NULL, 0);
+ if (IS_ERR(apcs->clk))
+ dev_err(&pdev->dev, "failed to register APCS clk\n");
+ }
+
+ platform_set_drvdata(pdev, apcs);
+
+ return 0;
+}
+
+static int qcom_apcs_ipc_remove(struct platform_device *pdev)
+{
+ struct qcom_apcs_ipc *apcs = platform_get_drvdata(pdev);
+ struct platform_device *clk = apcs->clk;
+
+ mbox_controller_unregister(&apcs->mbox);
+ platform_device_unregister(clk);
+
+ return 0;
+}
+
+/* .data is the offset of the ipc register within the global block */
+static const struct of_device_id qcom_apcs_ipc_of_match[] = {
+ { .compatible = "qcom,msm8916-apcs-kpss-global", .data = (void *)8 },
+ { .compatible = "qcom,msm8996-apcs-hmss-global", .data = (void *)16 },
+<<<<<<<
+=======
+ { .compatible = "qcom,msm8998-apcs-hmss-global", .data = (void *)8 },
+>>>>>>>
+ { .compatible = "qcom,sdm845-apss-shared", .data = (void *)12 },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_apcs_ipc_of_match);
+
+static struct platform_driver qcom_apcs_ipc_driver = {
+ .probe = qcom_apcs_ipc_probe,
+ .remove = qcom_apcs_ipc_remove,
+ .driver = {
+ .name = "qcom_apcs_ipc",
+ .of_match_table = qcom_apcs_ipc_of_match,
+ },
+};
+
+static int __init qcom_apcs_ipc_init(void)
+{
+ return platform_driver_register(&qcom_apcs_ipc_driver);
+}
+postcore_initcall(qcom_apcs_ipc_init);
+
+static void __exit qcom_apcs_ipc_exit(void)
+{
+ platform_driver_unregister(&qcom_apcs_ipc_driver);
+}
+module_exit(qcom_apcs_ipc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm APCS IPC driver");
diff --git a/rr-cache/3b5769cf96e6c1ad9c66e9a9eca1adef3f180dde/preimage b/rr-cache/3b5769cf96e6c1ad9c66e9a9eca1adef3f180dde/preimage
new file mode 100644
index 0000000..f6ba264
--- /dev/null
+++ b/rr-cache/3b5769cf96e6c1ad9c66e9a9eca1adef3f180dde/preimage
@@ -0,0 +1,43 @@
+Command DB
+---------
+
+Command DB is a database that provides a mapping between resource key and the
+resource address for a system resource managed by a remote processor. The data
+is stored in a shared memory region and is loaded by the remote processor.
+
+Some of the Qualcomm Technologies Inc SoC's have hardware accelerators for
+controlling shared resources. Depending on the board configuration the shared
+resource properties may change. These properties are dynamically probed by the
+remote processor and made available in the shared memory.
+
+The bindings for Command DB is specified in the reserved-memory section in
+devicetree. The devicetree representation of the command DB driver should be:
+
+Properties:
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: Should be "qcom,cmd-db"
+
+- reg:
+ Usage: required
+ Value type: <prop encoded array>
+ Definition: The register address that points to the actual location of
+ the Command DB in memory.
+
+Example:
+
+ reserved-memory {
+ [...]
+<<<<<<<
+ qcom,cmd-db@85fe0000 {
+ reg = <0x0 0x85fe0000 0x0 0x20000>;
+ compatible = "qcom,cmd-db";
+=======
+ reserved-memory@85fe0000 {
+ reg = <0x0 0x85fe0000 0x0 0x20000>;
+ compatible = "qcom,cmd-db";
+ no-map;
+>>>>>>>
+ };
+ };
diff --git a/rr-cache/3b5da2a9450cb2ebd4e2f88f94085248db799a64/postimage.1 b/rr-cache/3b5da2a9450cb2ebd4e2f88f94085248db799a64/postimage.1
new file mode 100644
index 0000000..5ab216f
--- /dev/null
+++ b/rr-cache/3b5da2a9450cb2ebd4e2f88f94085248db799a64/postimage.1
@@ -0,0 +1,123 @@
+Qualcomm QMP PHY controller
+===========================
+
+QMP phy controller supports physical layer functionality for a number of
+controllers on Qualcomm chipsets, such as, PCIe, UFS, and USB.
+
+Required properties:
+ - compatible: compatible list, contains:
+ "qcom,ipq8074-qmp-pcie-phy" for PCIe phy on IPQ8074
+ "qcom,msm8996-qmp-pcie-phy" for 14nm PCIe phy on msm8996,
+ "qcom,msm8996-qmp-usb3-phy" for 14nm USB3 phy on msm8996,
+ "qcom,sdm845-qmp-usb3-phy" for USB3 QMP V3 phy on sdm845,
+ "qcom,sdm845-qmp-usb3-uni-phy" for USB3 QMP V3 UNI phy on sdm845.
+ "qcom,sdm845-qmp-ufs-phy" for UFS PHY on SDM845
+
+ - reg: offset and length of register set for PHY's common serdes block.
+
+ - #clock-cells: must be 1
+ - Phy pll outputs a bunch of clocks for Tx, Rx and Pipe
+ interface (for pipe based PHYs). These clock are then gate-controlled
+ by gcc.
+ - #address-cells: must be 1
+ - #size-cells: must be 1
+ - ranges: must be present
+
+ - clocks: a list of phandles and clock-specifier pairs,
+ one for each entry in clock-names.
+ - clock-names: "cfg_ahb" for phy config clock,
+ "aux" for phy aux clock,
+ "ref" for 19.2 MHz ref clk,
+ "com_aux" for phy common block aux clock,
+ For "qcom,msm8996-qmp-pcie-phy" must contain:
+ "aux", "cfg_ahb", "ref".
+ For "qcom,msm8996-qmp-usb3-phy" must contain:
+ "aux", "cfg_ahb", "ref".
+ For "qcom,qmp-v3-usb3-phy" must contain:
+ "aux", "cfg_ahb", "ref", "com_aux".
+
+ - resets: a list of phandles and reset controller specifier pairs,
+ one for each entry in reset-names.
+ - reset-names: "phy" for reset of phy block,
+ "common" for phy common block reset,
+ "cfg" for phy's ahb cfg block reset (Optional).
+ For "qcom,msm8996-qmp-pcie-phy" must contain:
+ "phy", "common", "cfg".
+ For "qcom,msm8996-qmp-usb3-phy" must contain
+ "phy", "common".
+ For "qcom,ipq8074-qmp-pcie-phy" must contain:
+ "phy", "common".
+
+ - vdda-phy-supply: Phandle to a regulator supply to PHY core block.
+ - vdda-pll-supply: Phandle to 1.8V regulator supply to PHY refclk pll block.
+
+Optional properties:
+ - vddp-ref-clk-supply: Phandle to a regulator supply to any specific refclk
+ pll block.
+
+Required nodes:
+ - Each device node of QMP phy is required to have as many child nodes as
+ the number of lanes the PHY has.
+
+Required properties for child node:
+ - reg: list of offset and length pairs of register sets for PHY blocks -
+ tx, rx and pcs.
+
+ - #phy-cells: must be 0
+
+ - clocks: a list of phandles and clock-specifier pairs,
+ one for each entry in clock-names.
+ - clock-names: Must contain following for pcie and usb qmp phys:
+ "pipe<lane-number>" for pipe clock specific to each lane.
+ - clock-output-names: Name of the PHY clock that will be the parent for
+ the above pipe clock.
+
+ For "qcom,ipq8074-qmp-pcie-phy":
+ - "pcie20_phy0_pipe_clk" Pipe Clock parent
+ (or)
+ "pcie20_phy1_pipe_clk"
+
+ - resets: a list of phandles and reset controller specifier pairs,
+ one for each entry in reset-names.
+ - reset-names: Must contain following for pcie qmp phys:
+ "lane<lane-number>" for reset specific to each lane.
+
+Example:
+ phy@34000 {
+ compatible = "qcom,msm8996-qmp-pcie-phy";
+ reg = <0x34000 0x488>;
+ #clock-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
+ <&gcc GCC_PCIE_PHY_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_CLKREF_CLK>;
+ clock-names = "aux", "cfg_ahb", "ref";
+
+ vdda-phy-supply = <&pm8994_l28>;
+ vdda-pll-supply = <&pm8994_l12>;
+
+ resets = <&gcc GCC_PCIE_PHY_BCR>,
+ <&gcc GCC_PCIE_PHY_COM_BCR>,
+ <&gcc GCC_PCIE_PHY_COM_NOCSR_BCR>;
+ reset-names = "phy", "common", "cfg";
+
+ pciephy_0: lane@35000 {
+ reg = <0x35000 0x130>,
+ <0x35200 0x200>,
+ <0x35400 0x1dc>;
+ #phy-cells = <0>;
+
+ clocks = <&gcc GCC_PCIE_0_PIPE_CLK>;
+ clock-names = "pipe0";
+ clock-output-names = "pcie_0_pipe_clk_src";
+ resets = <&gcc GCC_PCIE_0_PHY_BCR>;
+ reset-names = "lane0";
+ };
+
+ pciephy_1: lane@36000 {
+ ...
+ ...
+ };
diff --git a/rr-cache/3b5da2a9450cb2ebd4e2f88f94085248db799a64/preimage b/rr-cache/3b5da2a9450cb2ebd4e2f88f94085248db799a64/preimage
new file mode 100644
index 0000000..7fe45ec
--- /dev/null
+++ b/rr-cache/3b5da2a9450cb2ebd4e2f88f94085248db799a64/preimage
@@ -0,0 +1,127 @@
+Qualcomm QMP PHY controller
+===========================
+
+QMP phy controller supports physical layer functionality for a number of
+controllers on Qualcomm chipsets, such as, PCIe, UFS, and USB.
+
+Required properties:
+ - compatible: compatible list, contains:
+ "qcom,ipq8074-qmp-pcie-phy" for PCIe phy on IPQ8074
+ "qcom,msm8996-qmp-pcie-phy" for 14nm PCIe phy on msm8996,
+ "qcom,msm8996-qmp-usb3-phy" for 14nm USB3 phy on msm8996,
+<<<<<<<
+ "qcom,qmp-v3-usb3-phy" for USB3 QMP V3 phy.
+ "qcom,sdm845-qmp-ufs-phy" for UFS PHY on SDM845
+=======
+ "qcom,sdm845-qmp-usb3-phy" for USB3 QMP V3 phy on sdm845,
+ "qcom,sdm845-qmp-usb3-uni-phy" for USB3 QMP V3 UNI phy on sdm845.
+>>>>>>>
+
+ - reg: offset and length of register set for PHY's common serdes block.
+
+ - #clock-cells: must be 1
+ - Phy pll outputs a bunch of clocks for Tx, Rx and Pipe
+ interface (for pipe based PHYs). These clock are then gate-controlled
+ by gcc.
+ - #address-cells: must be 1
+ - #size-cells: must be 1
+ - ranges: must be present
+
+ - clocks: a list of phandles and clock-specifier pairs,
+ one for each entry in clock-names.
+ - clock-names: "cfg_ahb" for phy config clock,
+ "aux" for phy aux clock,
+ "ref" for 19.2 MHz ref clk,
+ "com_aux" for phy common block aux clock,
+ For "qcom,msm8996-qmp-pcie-phy" must contain:
+ "aux", "cfg_ahb", "ref".
+ For "qcom,msm8996-qmp-usb3-phy" must contain:
+ "aux", "cfg_ahb", "ref".
+ For "qcom,qmp-v3-usb3-phy" must contain:
+ "aux", "cfg_ahb", "ref", "com_aux".
+
+ - resets: a list of phandles and reset controller specifier pairs,
+ one for each entry in reset-names.
+ - reset-names: "phy" for reset of phy block,
+ "common" for phy common block reset,
+ "cfg" for phy's ahb cfg block reset (Optional).
+ For "qcom,msm8996-qmp-pcie-phy" must contain:
+ "phy", "common", "cfg".
+ For "qcom,msm8996-qmp-usb3-phy" must contain
+ "phy", "common".
+ For "qcom,ipq8074-qmp-pcie-phy" must contain:
+ "phy", "common".
+
+ - vdda-phy-supply: Phandle to a regulator supply to PHY core block.
+ - vdda-pll-supply: Phandle to 1.8V regulator supply to PHY refclk pll block.
+
+Optional properties:
+ - vddp-ref-clk-supply: Phandle to a regulator supply to any specific refclk
+ pll block.
+
+Required nodes:
+ - Each device node of QMP phy is required to have as many child nodes as
+ the number of lanes the PHY has.
+
+Required properties for child node:
+ - reg: list of offset and length pairs of register sets for PHY blocks -
+ tx, rx and pcs.
+
+ - #phy-cells: must be 0
+
+ - clocks: a list of phandles and clock-specifier pairs,
+ one for each entry in clock-names.
+ - clock-names: Must contain following for pcie and usb qmp phys:
+ "pipe<lane-number>" for pipe clock specific to each lane.
+ - clock-output-names: Name of the PHY clock that will be the parent for
+ the above pipe clock.
+
+ For "qcom,ipq8074-qmp-pcie-phy":
+ - "pcie20_phy0_pipe_clk" Pipe Clock parent
+ (or)
+ "pcie20_phy1_pipe_clk"
+
+ - resets: a list of phandles and reset controller specifier pairs,
+ one for each entry in reset-names.
+ - reset-names: Must contain following for pcie qmp phys:
+ "lane<lane-number>" for reset specific to each lane.
+
+Example:
+ phy@34000 {
+ compatible = "qcom,msm8996-qmp-pcie-phy";
+ reg = <0x34000 0x488>;
+ #clock-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
+ <&gcc GCC_PCIE_PHY_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_CLKREF_CLK>;
+ clock-names = "aux", "cfg_ahb", "ref";
+
+ vdda-phy-supply = <&pm8994_l28>;
+ vdda-pll-supply = <&pm8994_l12>;
+
+ resets = <&gcc GCC_PCIE_PHY_BCR>,
+ <&gcc GCC_PCIE_PHY_COM_BCR>,
+ <&gcc GCC_PCIE_PHY_COM_NOCSR_BCR>;
+ reset-names = "phy", "common", "cfg";
+
+ pciephy_0: lane@35000 {
+ reg = <0x35000 0x130>,
+ <0x35200 0x200>,
+ <0x35400 0x1dc>;
+ #phy-cells = <0>;
+
+ clocks = <&gcc GCC_PCIE_0_PIPE_CLK>;
+ clock-names = "pipe0";
+ clock-output-names = "pcie_0_pipe_clk_src";
+ resets = <&gcc GCC_PCIE_0_PHY_BCR>;
+ reset-names = "lane0";
+ };
+
+ pciephy_1: lane@36000 {
+ ...
+ ...
+ };
diff --git a/rr-cache/3b5da2a9450cb2ebd4e2f88f94085248db799a64/preimage.1 b/rr-cache/3b5da2a9450cb2ebd4e2f88f94085248db799a64/preimage.1
new file mode 100644
index 0000000..7fe45ec
--- /dev/null
+++ b/rr-cache/3b5da2a9450cb2ebd4e2f88f94085248db799a64/preimage.1
@@ -0,0 +1,127 @@
+Qualcomm QMP PHY controller
+===========================
+
+QMP phy controller supports physical layer functionality for a number of
+controllers on Qualcomm chipsets, such as, PCIe, UFS, and USB.
+
+Required properties:
+ - compatible: compatible list, contains:
+ "qcom,ipq8074-qmp-pcie-phy" for PCIe phy on IPQ8074
+ "qcom,msm8996-qmp-pcie-phy" for 14nm PCIe phy on msm8996,
+ "qcom,msm8996-qmp-usb3-phy" for 14nm USB3 phy on msm8996,
+<<<<<<<
+ "qcom,qmp-v3-usb3-phy" for USB3 QMP V3 phy.
+ "qcom,sdm845-qmp-ufs-phy" for UFS PHY on SDM845
+=======
+ "qcom,sdm845-qmp-usb3-phy" for USB3 QMP V3 phy on sdm845,
+ "qcom,sdm845-qmp-usb3-uni-phy" for USB3 QMP V3 UNI phy on sdm845.
+>>>>>>>
+
+ - reg: offset and length of register set for PHY's common serdes block.
+
+ - #clock-cells: must be 1
+ - Phy pll outputs a bunch of clocks for Tx, Rx and Pipe
+ interface (for pipe based PHYs). These clock are then gate-controlled
+ by gcc.
+ - #address-cells: must be 1
+ - #size-cells: must be 1
+ - ranges: must be present
+
+ - clocks: a list of phandles and clock-specifier pairs,
+ one for each entry in clock-names.
+ - clock-names: "cfg_ahb" for phy config clock,
+ "aux" for phy aux clock,
+ "ref" for 19.2 MHz ref clk,
+ "com_aux" for phy common block aux clock,
+ For "qcom,msm8996-qmp-pcie-phy" must contain:
+ "aux", "cfg_ahb", "ref".
+ For "qcom,msm8996-qmp-usb3-phy" must contain:
+ "aux", "cfg_ahb", "ref".
+ For "qcom,qmp-v3-usb3-phy" must contain:
+ "aux", "cfg_ahb", "ref", "com_aux".
+
+ - resets: a list of phandles and reset controller specifier pairs,
+ one for each entry in reset-names.
+ - reset-names: "phy" for reset of phy block,
+ "common" for phy common block reset,
+ "cfg" for phy's ahb cfg block reset (Optional).
+ For "qcom,msm8996-qmp-pcie-phy" must contain:
+ "phy", "common", "cfg".
+ For "qcom,msm8996-qmp-usb3-phy" must contain
+ "phy", "common".
+ For "qcom,ipq8074-qmp-pcie-phy" must contain:
+ "phy", "common".
+
+ - vdda-phy-supply: Phandle to a regulator supply to PHY core block.
+ - vdda-pll-supply: Phandle to 1.8V regulator supply to PHY refclk pll block.
+
+Optional properties:
+ - vddp-ref-clk-supply: Phandle to a regulator supply to any specific refclk
+ pll block.
+
+Required nodes:
+ - Each device node of QMP phy is required to have as many child nodes as
+ the number of lanes the PHY has.
+
+Required properties for child node:
+ - reg: list of offset and length pairs of register sets for PHY blocks -
+ tx, rx and pcs.
+
+ - #phy-cells: must be 0
+
+ - clocks: a list of phandles and clock-specifier pairs,
+ one for each entry in clock-names.
+ - clock-names: Must contain following for pcie and usb qmp phys:
+ "pipe<lane-number>" for pipe clock specific to each lane.
+ - clock-output-names: Name of the PHY clock that will be the parent for
+ the above pipe clock.
+
+ For "qcom,ipq8074-qmp-pcie-phy":
+ - "pcie20_phy0_pipe_clk" Pipe Clock parent
+ (or)
+ "pcie20_phy1_pipe_clk"
+
+ - resets: a list of phandles and reset controller specifier pairs,
+ one for each entry in reset-names.
+ - reset-names: Must contain following for pcie qmp phys:
+ "lane<lane-number>" for reset specific to each lane.
+
+Example:
+ phy@34000 {
+ compatible = "qcom,msm8996-qmp-pcie-phy";
+ reg = <0x34000 0x488>;
+ #clock-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
+ <&gcc GCC_PCIE_PHY_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_CLKREF_CLK>;
+ clock-names = "aux", "cfg_ahb", "ref";
+
+ vdda-phy-supply = <&pm8994_l28>;
+ vdda-pll-supply = <&pm8994_l12>;
+
+ resets = <&gcc GCC_PCIE_PHY_BCR>,
+ <&gcc GCC_PCIE_PHY_COM_BCR>,
+ <&gcc GCC_PCIE_PHY_COM_NOCSR_BCR>;
+ reset-names = "phy", "common", "cfg";
+
+ pciephy_0: lane@35000 {
+ reg = <0x35000 0x130>,
+ <0x35200 0x200>,
+ <0x35400 0x1dc>;
+ #phy-cells = <0>;
+
+ clocks = <&gcc GCC_PCIE_0_PIPE_CLK>;
+ clock-names = "pipe0";
+ clock-output-names = "pcie_0_pipe_clk_src";
+ resets = <&gcc GCC_PCIE_0_PHY_BCR>;
+ reset-names = "lane0";
+ };
+
+ pciephy_1: lane@36000 {
+ ...
+ ...
+ };
diff --git a/rr-cache/3b5da2a9450cb2ebd4e2f88f94085248db799a64/thisimage.1 b/rr-cache/3b5da2a9450cb2ebd4e2f88f94085248db799a64/thisimage.1
new file mode 100644
index 0000000..7fe45ec
--- /dev/null
+++ b/rr-cache/3b5da2a9450cb2ebd4e2f88f94085248db799a64/thisimage.1
@@ -0,0 +1,127 @@
+Qualcomm QMP PHY controller
+===========================
+
+QMP phy controller supports physical layer functionality for a number of
+controllers on Qualcomm chipsets, such as, PCIe, UFS, and USB.
+
+Required properties:
+ - compatible: compatible list, contains:
+ "qcom,ipq8074-qmp-pcie-phy" for PCIe phy on IPQ8074
+ "qcom,msm8996-qmp-pcie-phy" for 14nm PCIe phy on msm8996,
+ "qcom,msm8996-qmp-usb3-phy" for 14nm USB3 phy on msm8996,
+<<<<<<<
+ "qcom,qmp-v3-usb3-phy" for USB3 QMP V3 phy.
+ "qcom,sdm845-qmp-ufs-phy" for UFS PHY on SDM845
+=======
+ "qcom,sdm845-qmp-usb3-phy" for USB3 QMP V3 phy on sdm845,
+ "qcom,sdm845-qmp-usb3-uni-phy" for USB3 QMP V3 UNI phy on sdm845.
+>>>>>>>
+
+ - reg: offset and length of register set for PHY's common serdes block.
+
+ - #clock-cells: must be 1
+ - Phy pll outputs a bunch of clocks for Tx, Rx and Pipe
+ interface (for pipe based PHYs). These clock are then gate-controlled
+ by gcc.
+ - #address-cells: must be 1
+ - #size-cells: must be 1
+ - ranges: must be present
+
+ - clocks: a list of phandles and clock-specifier pairs,
+ one for each entry in clock-names.
+ - clock-names: "cfg_ahb" for phy config clock,
+ "aux" for phy aux clock,
+ "ref" for 19.2 MHz ref clk,
+ "com_aux" for phy common block aux clock,
+ For "qcom,msm8996-qmp-pcie-phy" must contain:
+ "aux", "cfg_ahb", "ref".
+ For "qcom,msm8996-qmp-usb3-phy" must contain:
+ "aux", "cfg_ahb", "ref".
+ For "qcom,qmp-v3-usb3-phy" must contain:
+ "aux", "cfg_ahb", "ref", "com_aux".
+
+ - resets: a list of phandles and reset controller specifier pairs,
+ one for each entry in reset-names.
+ - reset-names: "phy" for reset of phy block,
+ "common" for phy common block reset,
+ "cfg" for phy's ahb cfg block reset (Optional).
+ For "qcom,msm8996-qmp-pcie-phy" must contain:
+ "phy", "common", "cfg".
+ For "qcom,msm8996-qmp-usb3-phy" must contain
+ "phy", "common".
+ For "qcom,ipq8074-qmp-pcie-phy" must contain:
+ "phy", "common".
+
+ - vdda-phy-supply: Phandle to a regulator supply to PHY core block.
+ - vdda-pll-supply: Phandle to 1.8V regulator supply to PHY refclk pll block.
+
+Optional properties:
+ - vddp-ref-clk-supply: Phandle to a regulator supply to any specific refclk
+ pll block.
+
+Required nodes:
+ - Each device node of QMP phy is required to have as many child nodes as
+ the number of lanes the PHY has.
+
+Required properties for child node:
+ - reg: list of offset and length pairs of register sets for PHY blocks -
+ tx, rx and pcs.
+
+ - #phy-cells: must be 0
+
+ - clocks: a list of phandles and clock-specifier pairs,
+ one for each entry in clock-names.
+ - clock-names: Must contain following for pcie and usb qmp phys:
+ "pipe<lane-number>" for pipe clock specific to each lane.
+ - clock-output-names: Name of the PHY clock that will be the parent for
+ the above pipe clock.
+
+ For "qcom,ipq8074-qmp-pcie-phy":
+ - "pcie20_phy0_pipe_clk" Pipe Clock parent
+ (or)
+ "pcie20_phy1_pipe_clk"
+
+ - resets: a list of phandles and reset controller specifier pairs,
+ one for each entry in reset-names.
+ - reset-names: Must contain following for pcie qmp phys:
+ "lane<lane-number>" for reset specific to each lane.
+
+Example:
+ phy@34000 {
+ compatible = "qcom,msm8996-qmp-pcie-phy";
+ reg = <0x34000 0x488>;
+ #clock-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
+ <&gcc GCC_PCIE_PHY_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_CLKREF_CLK>;
+ clock-names = "aux", "cfg_ahb", "ref";
+
+ vdda-phy-supply = <&pm8994_l28>;
+ vdda-pll-supply = <&pm8994_l12>;
+
+ resets = <&gcc GCC_PCIE_PHY_BCR>,
+ <&gcc GCC_PCIE_PHY_COM_BCR>,
+ <&gcc GCC_PCIE_PHY_COM_NOCSR_BCR>;
+ reset-names = "phy", "common", "cfg";
+
+ pciephy_0: lane@35000 {
+ reg = <0x35000 0x130>,
+ <0x35200 0x200>,
+ <0x35400 0x1dc>;
+ #phy-cells = <0>;
+
+ clocks = <&gcc GCC_PCIE_0_PIPE_CLK>;
+ clock-names = "pipe0";
+ clock-output-names = "pcie_0_pipe_clk_src";
+ resets = <&gcc GCC_PCIE_0_PHY_BCR>;
+ reset-names = "lane0";
+ };
+
+ pciephy_1: lane@36000 {
+ ...
+ ...
+ };
diff --git a/rr-cache/3b8d5ca2ef10d1975b8a81563bf7cbcead9923d8/preimage b/rr-cache/3b8d5ca2ef10d1975b8a81563bf7cbcead9923d8/preimage
new file mode 100644
index 0000000..e66e333
--- /dev/null
+++ b/rr-cache/3b8d5ca2ef10d1975b8a81563bf7cbcead9923d8/preimage
@@ -0,0 +1,1039 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/bug.h>
+#include <linux/export.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/regmap.h>
+#include <linux/math64.h>
+
+#include <asm/div64.h>
+
+#include "clk-rcg.h"
+#include "common.h"
+
+#define CMD_REG 0x0
+#define CMD_UPDATE BIT(0)
+#define CMD_ROOT_EN BIT(1)
+#define CMD_DIRTY_CFG BIT(4)
+#define CMD_DIRTY_N BIT(5)
+#define CMD_DIRTY_M BIT(6)
+#define CMD_DIRTY_D BIT(7)
+#define CMD_ROOT_OFF BIT(31)
+
+#define CFG_REG 0x4
+#define CFG_SRC_DIV_SHIFT 0
+#define CFG_SRC_SEL_SHIFT 8
+#define CFG_SRC_SEL_MASK (0x7 << CFG_SRC_SEL_SHIFT)
+#define CFG_MODE_SHIFT 12
+#define CFG_MODE_MASK (0x3 << CFG_MODE_SHIFT)
+#define CFG_MODE_DUAL_EDGE (0x2 << CFG_MODE_SHIFT)
+#define CFG_HW_CLK_CTRL_MASK BIT(20)
+
+#define M_REG 0x8
+#define N_REG 0xc
+#define D_REG 0x10
+
+enum freq_policy {
+ FLOOR,
+ CEIL,
+};
+
+static int clk_rcg2_is_enabled(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ u32 cmd;
+ int ret;
+
+ ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
+ if (ret)
+ return ret;
+
+ return (cmd & CMD_ROOT_OFF) == 0;
+}
+
+static u8 clk_rcg2_get_parent(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ int num_parents = clk_hw_get_num_parents(hw);
+ u32 cfg;
+ int i, ret;
+
+ ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+ if (ret)
+ goto err;
+
+ cfg &= CFG_SRC_SEL_MASK;
+ cfg >>= CFG_SRC_SEL_SHIFT;
+
+ for (i = 0; i < num_parents; i++)
+ if (cfg == rcg->parent_map[i].cfg)
+ return i;
+
+err:
+ pr_debug("%s: Clock %s has invalid parent, using default.\n",
+ __func__, clk_hw_get_name(hw));
+ return 0;
+}
+
+static int update_config(struct clk_rcg2 *rcg)
+{
+ int count, ret;
+ u32 cmd;
+ struct clk_hw *hw = &rcg->clkr.hw;
+ const char *name = clk_hw_get_name(hw);
+
+ ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
+ CMD_UPDATE, CMD_UPDATE);
+ if (ret)
+ return ret;
+
+ /* Wait for update to take effect */
+ for (count = 500; count > 0; count--) {
+ ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
+ if (ret)
+ return ret;
+ if (!(cmd & CMD_UPDATE))
+ return 0;
+ udelay(1);
+ }
+
+ WARN(1, "%s: rcg didn't update its configuration.", name);
+ return 0;
+}
+
+static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ int ret;
+ u32 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
+
+ ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
+ CFG_SRC_SEL_MASK, cfg);
+ if (ret)
+ return ret;
+
+ return update_config(rcg);
+}
+
+/*
+ * Calculate m/n:d rate
+ *
+ * parent_rate m
+ * rate = ----------- x ---
+ * hid_div n
+ */
+static unsigned long
+calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
+{
+ if (hid_div) {
+ rate *= 2;
+ rate /= hid_div + 1;
+ }
+
+ if (mode) {
+ u64 tmp = rate;
+ tmp *= m;
+ do_div(tmp, n);
+ rate = tmp;
+ }
+
+ return rate;
+}
+
+static unsigned long
+clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ u32 cfg, hid_div, m = 0, n = 0, mode = 0, mask;
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+
+ if (rcg->mnd_width) {
+ mask = BIT(rcg->mnd_width) - 1;
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + M_REG, &m);
+ m &= mask;
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + N_REG, &n);
+ n = ~n;
+ n &= mask;
+ n += m;
+ mode = cfg & CFG_MODE_MASK;
+ mode >>= CFG_MODE_SHIFT;
+ }
+
+ mask = BIT(rcg->hid_width) - 1;
+ hid_div = cfg >> CFG_SRC_DIV_SHIFT;
+ hid_div &= mask;
+
+ return calc_rate(parent_rate, m, n, mode, hid_div);
+}
+
+static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
+ struct clk_rate_request *req,
+ enum freq_policy policy)
+{
+ unsigned long clk_flags, rate = req->rate;
+ struct clk_hw *p;
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ int index;
+
+ switch (policy) {
+ case FLOOR:
+ f = qcom_find_freq_floor(f, rate);
+ break;
+ case CEIL:
+ f = qcom_find_freq(f, rate);
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ if (!f)
+ return -EINVAL;
+
+ index = qcom_find_src_index(hw, rcg->parent_map, f->src);
+ if (index < 0)
+ return index;
+
+ clk_flags = clk_hw_get_flags(hw);
+ p = clk_hw_get_parent_by_index(hw, index);
+ if (clk_flags & CLK_SET_RATE_PARENT) {
+ rate = f->freq;
+ if (f->pre_div) {
+ rate /= 2;
+ rate *= f->pre_div + 1;
+ }
+
+ if (f->n) {
+ u64 tmp = rate;
+ tmp = tmp * f->n;
+ do_div(tmp, f->m);
+ rate = tmp;
+ }
+ } else {
+ rate = clk_hw_get_rate(p);
+ }
+ req->best_parent_hw = p;
+ req->best_parent_rate = rate;
+ req->rate = f->freq;
+
+ return 0;
+}
+
+static int clk_rcg2_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, CEIL);
+}
+
+static int clk_rcg2_determine_floor_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR);
+}
+
+static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
+{
+ u32 cfg, mask;
+ struct clk_hw *hw = &rcg->clkr.hw;
+ int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src);
+
+ if (index < 0)
+ return index;
+
+ if (rcg->mnd_width && f->n) {
+ mask = BIT(rcg->mnd_width) - 1;
+ ret = regmap_update_bits(rcg->clkr.regmap,
+ rcg->cmd_rcgr + M_REG, mask, f->m);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(rcg->clkr.regmap,
+ rcg->cmd_rcgr + N_REG, mask, ~(f->n - f->m));
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(rcg->clkr.regmap,
+ rcg->cmd_rcgr + D_REG, mask, ~f->n);
+ if (ret)
+ return ret;
+ }
+
+ mask = BIT(rcg->hid_width) - 1;
+ mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK | CFG_HW_CLK_CTRL_MASK;
+ cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
+ cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
+ if (rcg->mnd_width && f->n && (f->m != f->n))
+ cfg |= CFG_MODE_DUAL_EDGE;
+
+ return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
+ mask, cfg);
+}
+
+static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
+{
+ int ret;
+
+ ret = __clk_rcg2_configure(rcg, f);
+ if (ret)
+ return ret;
+
+ return update_config(rcg);
+}
+
+static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
+ enum freq_policy policy)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ const struct freq_tbl *f;
+
+ switch (policy) {
+ case FLOOR:
+ f = qcom_find_freq_floor(rcg->freq_tbl, rate);
+ break;
+ case CEIL:
+ f = qcom_find_freq(rcg->freq_tbl, rate);
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ if (!f)
+ return -EINVAL;
+
+ return clk_rcg2_configure(rcg, f);
+}
+
+static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return __clk_rcg2_set_rate(hw, rate, CEIL);
+}
+
+static int clk_rcg2_set_floor_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return __clk_rcg2_set_rate(hw, rate, FLOOR);
+}
+
+static int clk_rcg2_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ return __clk_rcg2_set_rate(hw, rate, CEIL);
+}
+
+static int clk_rcg2_set_floor_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ return __clk_rcg2_set_rate(hw, rate, FLOOR);
+}
+
+const struct clk_ops clk_rcg2_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .determine_rate = clk_rcg2_determine_rate,
+ .set_rate = clk_rcg2_set_rate,
+ .set_rate_and_parent = clk_rcg2_set_rate_and_parent,
+};
+EXPORT_SYMBOL_GPL(clk_rcg2_ops);
+
+const struct clk_ops clk_rcg2_floor_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .determine_rate = clk_rcg2_determine_floor_rate,
+ .set_rate = clk_rcg2_set_floor_rate,
+ .set_rate_and_parent = clk_rcg2_set_floor_rate_and_parent,
+};
+EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops);
+
+struct frac_entry {
+ int num;
+ int den;
+};
+
+static const struct frac_entry frac_table_675m[] = { /* link rate of 270M */
+ { 52, 295 }, /* 119 M */
+ { 11, 57 }, /* 130.25 M */
+ { 63, 307 }, /* 138.50 M */
+ { 11, 50 }, /* 148.50 M */
+ { 47, 206 }, /* 154 M */
+ { 31, 100 }, /* 205.25 M */
+ { 107, 269 }, /* 268.50 M */
+ { },
+};
+
+static struct frac_entry frac_table_810m[] = { /* Link rate of 162M */
+ { 31, 211 }, /* 119 M */
+ { 32, 199 }, /* 130.25 M */
+ { 63, 307 }, /* 138.50 M */
+ { 11, 60 }, /* 148.50 M */
+ { 50, 263 }, /* 154 M */
+ { 31, 120 }, /* 205.25 M */
+ { 119, 359 }, /* 268.50 M */
+ { },
+};
+
+static int clk_edp_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ struct freq_tbl f = *rcg->freq_tbl;
+ const struct frac_entry *frac;
+ int delta = 100000;
+ s64 src_rate = parent_rate;
+ s64 request;
+ u32 mask = BIT(rcg->hid_width) - 1;
+ u32 hid_div;
+
+ if (src_rate == 810000000)
+ frac = frac_table_810m;
+ else
+ frac = frac_table_675m;
+
+ for (; frac->num; frac++) {
+ request = rate;
+ request *= frac->den;
+ request = div_s64(request, frac->num);
+ if ((src_rate < (request - delta)) ||
+ (src_rate > (request + delta)))
+ continue;
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
+ &hid_div);
+ f.pre_div = hid_div;
+ f.pre_div >>= CFG_SRC_DIV_SHIFT;
+ f.pre_div &= mask;
+ f.m = frac->num;
+ f.n = frac->den;
+
+ return clk_rcg2_configure(rcg, &f);
+ }
+
+ return -EINVAL;
+}
+
+static int clk_edp_pixel_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ /* Parent index is set statically in frequency table */
+ return clk_edp_pixel_set_rate(hw, rate, parent_rate);
+}
+
+static int clk_edp_pixel_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ const struct freq_tbl *f = rcg->freq_tbl;
+ const struct frac_entry *frac;
+ int delta = 100000;
+ s64 request;
+ u32 mask = BIT(rcg->hid_width) - 1;
+ u32 hid_div;
+ int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
+
+ /* Force the correct parent */
+ req->best_parent_hw = clk_hw_get_parent_by_index(hw, index);
+ req->best_parent_rate = clk_hw_get_rate(req->best_parent_hw);
+
+ if (req->best_parent_rate == 810000000)
+ frac = frac_table_810m;
+ else
+ frac = frac_table_675m;
+
+ for (; frac->num; frac++) {
+ request = req->rate;
+ request *= frac->den;
+ request = div_s64(request, frac->num);
+ if ((req->best_parent_rate < (request - delta)) ||
+ (req->best_parent_rate > (request + delta)))
+ continue;
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
+ &hid_div);
+ hid_div >>= CFG_SRC_DIV_SHIFT;
+ hid_div &= mask;
+
+ req->rate = calc_rate(req->best_parent_rate,
+ frac->num, frac->den,
+ !!frac->den, hid_div);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+const struct clk_ops clk_edp_pixel_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .set_rate = clk_edp_pixel_set_rate,
+ .set_rate_and_parent = clk_edp_pixel_set_rate_and_parent,
+ .determine_rate = clk_edp_pixel_determine_rate,
+};
+EXPORT_SYMBOL_GPL(clk_edp_pixel_ops);
+
+static int clk_byte_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ const struct freq_tbl *f = rcg->freq_tbl;
+ int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
+ unsigned long parent_rate, div;
+ u32 mask = BIT(rcg->hid_width) - 1;
+ struct clk_hw *p;
+
+ if (req->rate == 0)
+ return -EINVAL;
+
+ req->best_parent_hw = p = clk_hw_get_parent_by_index(hw, index);
+ req->best_parent_rate = parent_rate = clk_hw_round_rate(p, req->rate);
+
+ div = DIV_ROUND_UP((2 * parent_rate), req->rate) - 1;
+ div = min_t(u32, div, mask);
+
+ req->rate = calc_rate(parent_rate, 0, 0, 0, div);
+
+ return 0;
+}
+
+static int clk_byte_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ struct freq_tbl f = *rcg->freq_tbl;
+ unsigned long div;
+ u32 mask = BIT(rcg->hid_width) - 1;
+
+ div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
+ div = min_t(u32, div, mask);
+
+ f.pre_div = div;
+
+ return clk_rcg2_configure(rcg, &f);
+}
+
+static int clk_byte_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ /* Parent index is set statically in frequency table */
+ return clk_byte_set_rate(hw, rate, parent_rate);
+}
+
+const struct clk_ops clk_byte_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .set_rate = clk_byte_set_rate,
+ .set_rate_and_parent = clk_byte_set_rate_and_parent,
+ .determine_rate = clk_byte_determine_rate,
+};
+EXPORT_SYMBOL_GPL(clk_byte_ops);
+
+static int clk_byte2_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ unsigned long parent_rate, div;
+ u32 mask = BIT(rcg->hid_width) - 1;
+ struct clk_hw *p;
+ unsigned long rate = req->rate;
+
+ if (rate == 0)
+ return -EINVAL;
+
+ p = req->best_parent_hw;
+ req->best_parent_rate = parent_rate = clk_hw_round_rate(p, rate);
+
+ div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
+ div = min_t(u32, div, mask);
+
+ req->rate = calc_rate(parent_rate, 0, 0, 0, div);
+
+ return 0;
+}
+
+static int clk_byte2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ struct freq_tbl f = { 0 };
+ unsigned long div;
+ int i, num_parents = clk_hw_get_num_parents(hw);
+ u32 mask = BIT(rcg->hid_width) - 1;
+ u32 cfg;
+
+ div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
+ div = min_t(u32, div, mask);
+
+ f.pre_div = div;
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+ cfg &= CFG_SRC_SEL_MASK;
+ cfg >>= CFG_SRC_SEL_SHIFT;
+
+ for (i = 0; i < num_parents; i++) {
+ if (cfg == rcg->parent_map[i].cfg) {
+ f.src = rcg->parent_map[i].src;
+ return clk_rcg2_configure(rcg, &f);
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int clk_byte2_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ /* Read the hardware to determine parent during set_rate */
+ return clk_byte2_set_rate(hw, rate, parent_rate);
+}
+
+const struct clk_ops clk_byte2_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .set_rate = clk_byte2_set_rate,
+ .set_rate_and_parent = clk_byte2_set_rate_and_parent,
+ .determine_rate = clk_byte2_determine_rate,
+};
+EXPORT_SYMBOL_GPL(clk_byte2_ops);
+
+static const struct frac_entry frac_table_pixel[] = {
+ { 3, 8 },
+ { 2, 9 },
+ { 4, 9 },
+ { 1, 1 },
+ { }
+};
+
+static int clk_pixel_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ unsigned long request, src_rate;
+ int delta = 100000;
+ const struct frac_entry *frac = frac_table_pixel;
+
+ for (; frac->num; frac++) {
+ request = (req->rate * frac->den) / frac->num;
+
+ src_rate = clk_hw_round_rate(req->best_parent_hw, request);
+ if ((src_rate < (request - delta)) ||
+ (src_rate > (request + delta)))
+ continue;
+
+ req->best_parent_rate = src_rate;
+ req->rate = (src_rate * frac->num) / frac->den;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ struct freq_tbl f = { 0 };
+ const struct frac_entry *frac = frac_table_pixel;
+ unsigned long request;
+ int delta = 100000;
+ u32 mask = BIT(rcg->hid_width) - 1;
+ u32 hid_div, cfg;
+ int i, num_parents = clk_hw_get_num_parents(hw);
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+ cfg &= CFG_SRC_SEL_MASK;
+ cfg >>= CFG_SRC_SEL_SHIFT;
+
+ for (i = 0; i < num_parents; i++)
+ if (cfg == rcg->parent_map[i].cfg) {
+ f.src = rcg->parent_map[i].src;
+ break;
+ }
+
+ for (; frac->num; frac++) {
+ request = (rate * frac->den) / frac->num;
+
+ if ((parent_rate < (request - delta)) ||
+ (parent_rate > (request + delta)))
+ continue;
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
+ &hid_div);
+ f.pre_div = hid_div;
+ f.pre_div >>= CFG_SRC_DIV_SHIFT;
+ f.pre_div &= mask;
+ f.m = frac->num;
+ f.n = frac->den;
+
+ return clk_rcg2_configure(rcg, &f);
+ }
+ return -EINVAL;
+}
+
+static int clk_pixel_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate, u8 index)
+{
+ return clk_pixel_set_rate(hw, rate, parent_rate);
+}
+
+const struct clk_ops clk_pixel_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .set_rate = clk_pixel_set_rate,
+ .set_rate_and_parent = clk_pixel_set_rate_and_parent,
+ .determine_rate = clk_pixel_determine_rate,
+};
+EXPORT_SYMBOL_GPL(clk_pixel_ops);
+
+static int clk_gfx3d_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rate_request parent_req = { };
+ struct clk_hw *p2, *p8, *p9, *xo;
+ unsigned long p9_rate;
+ int ret;
+
+ xo = clk_hw_get_parent_by_index(hw, 0);
+ if (req->rate == clk_hw_get_rate(xo)) {
+ req->best_parent_hw = xo;
+ return 0;
+ }
+
+ p9 = clk_hw_get_parent_by_index(hw, 2);
+ p2 = clk_hw_get_parent_by_index(hw, 3);
+ p8 = clk_hw_get_parent_by_index(hw, 4);
+
+ /* PLL9 is a fixed rate PLL */
+ p9_rate = clk_hw_get_rate(p9);
+
+ parent_req.rate = req->rate = min(req->rate, p9_rate);
+ if (req->rate == p9_rate) {
+ req->rate = req->best_parent_rate = p9_rate;
+ req->best_parent_hw = p9;
+ return 0;
+ }
+
+ if (req->best_parent_hw == p9) {
+ /* Are we going back to a previously used rate? */
+ if (clk_hw_get_rate(p8) == req->rate)
+ req->best_parent_hw = p8;
+ else
+ req->best_parent_hw = p2;
+ } else if (req->best_parent_hw == p8) {
+ req->best_parent_hw = p2;
+ } else {
+ req->best_parent_hw = p8;
+ }
+
+ ret = __clk_determine_rate(req->best_parent_hw, &parent_req);
+ if (ret)
+ return ret;
+
+ req->rate = req->best_parent_rate = parent_req.rate;
+
+ return 0;
+}
+
+static int clk_gfx3d_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate, u8 index)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ u32 cfg;
+ int ret;
+
+ /* Just mux it, we don't use the division or m/n hardware */
+ cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
+ ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
+ if (ret)
+ return ret;
+
+ return update_config(rcg);
+}
+
+static int clk_gfx3d_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ /*
+ * We should never get here; clk_gfx3d_determine_rate() should always
+ * make us use a different parent than what we're currently using, so
+ * clk_gfx3d_set_rate_and_parent() should always be called.
+ */
+ return 0;
+}
+
+const struct clk_ops clk_gfx3d_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .set_rate = clk_gfx3d_set_rate,
+ .set_rate_and_parent = clk_gfx3d_set_rate_and_parent,
+ .determine_rate = clk_gfx3d_determine_rate,
+};
+EXPORT_SYMBOL_GPL(clk_gfx3d_ops);
+
+static int clk_rcg2_set_force_enable(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ const char *name = clk_hw_get_name(hw);
+ int ret, count;
+
+<<<<<<<
+=======
+ /* Force enable bit */
+>>>>>>>
+ ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
+ CMD_ROOT_EN, CMD_ROOT_EN);
+ if (ret)
+ return ret;
+
+ /* wait for RCG to turn ON */
+ for (count = 500; count > 0; count--) {
+ if (clk_rcg2_is_enabled(hw))
+ return 0;
+
+<<<<<<<
+ /* Delay for 1usec and retry polling the status bit */
+ udelay(1);
+ }
+ if (!count)
+ pr_err("%s: RCG did not turn on\n", name);
+
+=======
+ udelay(1);
+ }
+
+ pr_err("%s: RCG did not turn on\n", name);
+>>>>>>>
+ return -ETIMEDOUT;
+}
+
+static int clk_rcg2_clear_force_enable(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+<<<<<<<
+=======
+ /* Clear force enable bit */
+>>>>>>>
+ return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
+ CMD_ROOT_EN, 0);
+}
+
+static int
+<<<<<<<
+clk_rcg2_shared_force_enable_clear(struct clk_hw *hw, const struct freq_tbl *f)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+=======
+clk_rcg2_shared_force_enable_clear(struct clk_hw *hw, unsigned long rate)
+{
+>>>>>>>
+ int ret;
+
+ ret = clk_rcg2_set_force_enable(hw);
+ if (ret)
+ return ret;
+
+<<<<<<<
+ /* set clock rate */
+ ret = __clk_rcg2_set_rate(hw, rate, CEIL);
+=======
+ ret = clk_rcg2_configure(rcg, f);
+>>>>>>>
+ if (ret)
+ return ret;
+
+ return clk_rcg2_clear_force_enable(hw);
+}
+
+static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+<<<<<<<
+ const struct freq_tbl *f;
+
+ f = qcom_find_freq(rcg->freq_tbl, rate);
+ if (!f)
+ return -EINVAL;
+
+ /*
+ * In case clock is disabled, update the CFG, M, N and D registers
+ * and don't hit the update bit of CMD register.
+ */
+ if (!__clk_is_enabled(hw->clk))
+ return __clk_rcg2_configure(rcg, f);
+
+ return clk_rcg2_shared_force_enable_clear(hw, f);
+=======
+ int ret;
+
+ /*
+ * Return if the RCG is currently disabled. This configuration
+ * update will happen as part of the RCG enable sequence.
+ */
+ if (!__clk_is_enabled(hw->clk)) {
+ rcg->current_freq = rate;
+ return 0;
+ }
+
+ ret = clk_rcg2_shared_force_enable_clear(hw, rate);
+ if (ret)
+ return ret;
+
+ /* Update current frequency with the requested frequency. */
+ rcg->current_freq = rate;
+
+ return ret;
+>>>>>>>
+}
+
+static int clk_rcg2_shared_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ return clk_rcg2_shared_set_rate(hw, rate, parent_rate);
+}
+
+<<<<<<<
+static int clk_rcg2_shared_enable(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ int ret;
+
+ /*
+ * Set the update bit because required configuration has already
+ * been written in clk_rcg2_shared_set_rate()
+ */
+ ret = clk_rcg2_set_force_enable(hw);
+ if (ret)
+ return ret;
+
+ ret = update_config(rcg);
+ if (ret)
+ return ret;
+
+ return clk_rcg2_clear_force_enable(hw);
+=======
+static unsigned long
+clk_rcg2_shared_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ if (!__clk_is_enabled(hw->clk)) {
+ if (!rcg->current_freq) {
+ if (!clk_rcg2_get_parent(hw))
+ rcg->current_freq =
+ rcg->safe_src_freq_tbl->freq;
+ else
+ rcg->current_freq =
+ clk_rcg2_recalc_rate(hw, parent_rate);
+ }
+
+ return rcg->current_freq;
+ }
+
+ return rcg->current_freq = clk_rcg2_recalc_rate(hw, parent_rate);
+}
+
+static int clk_rcg2_shared_enable(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ if (rcg->current_freq == rcg->safe_src_freq_tbl->freq) {
+ clk_rcg2_set_force_enable(hw);
+ clk_rcg2_configure(rcg, rcg->safe_src_freq_tbl);
+ clk_rcg2_clear_force_enable(hw);
+
+ return 0;
+ }
+
+ /*
+ * Switch from safe source to the stashed mux selection. The current
+ * parent has already been prepared and enabled at this point, and
+ * the safe source is always on while application processor subsystem
+ * is online. Therefore, the RCG can safely switch its source.
+ */
+
+ return clk_rcg2_shared_force_enable_clear(hw, rcg->current_freq);
+>>>>>>>
+}
+
+static void clk_rcg2_shared_disable(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+<<<<<<<
+ u32 cfg;
+
+ /*
+ * Store current configuration as switching to safe source would clear
+ * the SRC and DIV of CFG register
+ */
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+
+ /*
+ * Park the RCG at a safe configuration - sourced off of safe source.
+=======
+
+ /*
+ * Park the RCG at a safe configuration - sourced off from safe source.
+>>>>>>>
+ * Force enable and disable the RCG while configuring it to safeguard
+ * against any update signal coming from the downstream clock.
+ * The current parent is still prepared and enabled at this point, and
+ * the safe source is always on while application processor subsystem
+ * is online. Therefore, the RCG can safely switch its parent.
+ */
+ clk_rcg2_set_force_enable(hw);
+<<<<<<<
+ clk_rcg2_configure(rcg, rcg->safe_src_freq_tbl);
+ clk_rcg2_clear_force_enable(hw);
+=======
+
+ regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
+ rcg->safe_src_index << CFG_SRC_SEL_SHIFT);
+
+ update_config(rcg);
+
+ clk_rcg2_clear_force_enable(hw);
+
+ /* Write back the stored configuration corresponding to current rate */
+ regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
+>>>>>>>
+}
+
+const struct clk_ops clk_rcg2_shared_ops = {
+ .enable = clk_rcg2_shared_enable,
+ .disable = clk_rcg2_shared_disable,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+<<<<<<<
+ .recalc_rate = clk_rcg2_recalc_rate,
+=======
+ .recalc_rate = clk_rcg2_shared_recalc_rate,
+>>>>>>>
+ .determine_rate = clk_rcg2_determine_rate,
+ .set_rate = clk_rcg2_shared_set_rate,
+ .set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent,
+};
+EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);
diff --git a/rr-cache/41a5a26760c0c7d81b8463f6e476126578e8739e/preimage b/rr-cache/41a5a26760c0c7d81b8463f6e476126578e8739e/preimage
new file mode 100644
index 0000000..c41530d
--- /dev/null
+++ b/rr-cache/41a5a26760c0c7d81b8463f6e476126578e8739e/preimage
@@ -0,0 +1,1051 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved.
+<<<<<<<
+=======
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+>>>>>>>
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/bug.h>
+#include <linux/export.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/regmap.h>
+#include <linux/math64.h>
+
+#include <asm/div64.h>
+
+#include "clk-rcg.h"
+#include "common.h"
+
+#define CMD_REG 0x0
+#define CMD_UPDATE BIT(0)
+#define CMD_ROOT_EN BIT(1)
+#define CMD_DIRTY_CFG BIT(4)
+#define CMD_DIRTY_N BIT(5)
+#define CMD_DIRTY_M BIT(6)
+#define CMD_DIRTY_D BIT(7)
+#define CMD_ROOT_OFF BIT(31)
+
+#define CFG_REG 0x4
+#define CFG_SRC_DIV_SHIFT 0
+#define CFG_SRC_SEL_SHIFT 8
+#define CFG_SRC_SEL_MASK (0x7 << CFG_SRC_SEL_SHIFT)
+#define CFG_MODE_SHIFT 12
+#define CFG_MODE_MASK (0x3 << CFG_MODE_SHIFT)
+#define CFG_MODE_DUAL_EDGE (0x2 << CFG_MODE_SHIFT)
+#define CFG_HW_CLK_CTRL_MASK BIT(20)
+
+#define M_REG 0x8
+#define N_REG 0xc
+#define D_REG 0x10
+
+enum freq_policy {
+ FLOOR,
+ CEIL,
+};
+
+static int clk_rcg2_is_enabled(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ u32 cmd;
+ int ret;
+
+ ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
+ if (ret)
+ return ret;
+
+ return (cmd & CMD_ROOT_OFF) == 0;
+}
+
+static u8 clk_rcg2_get_parent(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ int num_parents = clk_hw_get_num_parents(hw);
+ u32 cfg;
+ int i, ret;
+
+ ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+ if (ret)
+ goto err;
+
+ cfg &= CFG_SRC_SEL_MASK;
+ cfg >>= CFG_SRC_SEL_SHIFT;
+
+ for (i = 0; i < num_parents; i++)
+ if (cfg == rcg->parent_map[i].cfg)
+ return i;
+
+err:
+ pr_debug("%s: Clock %s has invalid parent, using default.\n",
+ __func__, clk_hw_get_name(hw));
+ return 0;
+}
+
+static int update_config(struct clk_rcg2 *rcg)
+{
+ int count, ret;
+ u32 cmd;
+ struct clk_hw *hw = &rcg->clkr.hw;
+ const char *name = clk_hw_get_name(hw);
+
+ ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
+ CMD_UPDATE, CMD_UPDATE);
+ if (ret)
+ return ret;
+
+ /* Wait for update to take effect */
+ for (count = 500; count > 0; count--) {
+ ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
+ if (ret)
+ return ret;
+ if (!(cmd & CMD_UPDATE))
+ return 0;
+ udelay(1);
+ }
+
+ WARN(1, "%s: rcg didn't update its configuration.", name);
+ return 0;
+}
+
+static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ int ret;
+ u32 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
+
+ ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
+ CFG_SRC_SEL_MASK, cfg);
+ if (ret)
+ return ret;
+
+ return update_config(rcg);
+}
+
+/*
+ * Calculate m/n:d rate
+ *
+ * parent_rate m
+ * rate = ----------- x ---
+ * hid_div n
+ */
+static unsigned long
+calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
+{
+ if (hid_div) {
+ rate *= 2;
+ rate /= hid_div + 1;
+ }
+
+ if (mode) {
+ u64 tmp = rate;
+ tmp *= m;
+ do_div(tmp, n);
+ rate = tmp;
+ }
+
+ return rate;
+}
+
+static unsigned long
+clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ u32 cfg, hid_div, m = 0, n = 0, mode = 0, mask;
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+
+ if (rcg->mnd_width) {
+ mask = BIT(rcg->mnd_width) - 1;
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + M_REG, &m);
+ m &= mask;
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + N_REG, &n);
+ n = ~n;
+ n &= mask;
+ n += m;
+ mode = cfg & CFG_MODE_MASK;
+ mode >>= CFG_MODE_SHIFT;
+ }
+
+ mask = BIT(rcg->hid_width) - 1;
+ hid_div = cfg >> CFG_SRC_DIV_SHIFT;
+ hid_div &= mask;
+
+ return calc_rate(parent_rate, m, n, mode, hid_div);
+}
+
+static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
+ struct clk_rate_request *req,
+ enum freq_policy policy)
+{
+ unsigned long clk_flags, rate = req->rate;
+ struct clk_hw *p;
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ int index;
+
+ switch (policy) {
+ case FLOOR:
+ f = qcom_find_freq_floor(f, rate);
+ break;
+ case CEIL:
+ f = qcom_find_freq(f, rate);
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ if (!f)
+ return -EINVAL;
+
+ index = qcom_find_src_index(hw, rcg->parent_map, f->src);
+ if (index < 0)
+ return index;
+
+ clk_flags = clk_hw_get_flags(hw);
+ p = clk_hw_get_parent_by_index(hw, index);
+ if (clk_flags & CLK_SET_RATE_PARENT) {
+ rate = f->freq;
+ if (f->pre_div) {
+ rate /= 2;
+ rate *= f->pre_div + 1;
+ }
+
+ if (f->n) {
+ u64 tmp = rate;
+ tmp = tmp * f->n;
+ do_div(tmp, f->m);
+ rate = tmp;
+ }
+ } else {
+ rate = clk_hw_get_rate(p);
+ }
+ req->best_parent_hw = p;
+ req->best_parent_rate = rate;
+ req->rate = f->freq;
+
+ return 0;
+}
+
+static int clk_rcg2_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, CEIL);
+}
+
+static int clk_rcg2_determine_floor_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR);
+}
+
+static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
+{
+ u32 cfg, mask;
+ struct clk_hw *hw = &rcg->clkr.hw;
+ int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src);
+
+ if (index < 0)
+ return index;
+
+ if (rcg->mnd_width && f->n) {
+ mask = BIT(rcg->mnd_width) - 1;
+ ret = regmap_update_bits(rcg->clkr.regmap,
+ rcg->cmd_rcgr + M_REG, mask, f->m);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(rcg->clkr.regmap,
+ rcg->cmd_rcgr + N_REG, mask, ~(f->n - f->m));
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(rcg->clkr.regmap,
+ rcg->cmd_rcgr + D_REG, mask, ~f->n);
+ if (ret)
+ return ret;
+ }
+
+ mask = BIT(rcg->hid_width) - 1;
+ mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK | CFG_HW_CLK_CTRL_MASK;
+ cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
+ cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
+ if (rcg->mnd_width && f->n && (f->m != f->n))
+ cfg |= CFG_MODE_DUAL_EDGE;
+
+ return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
+ mask, cfg);
+}
+
+static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
+{
+ int ret;
+
+ ret = __clk_rcg2_configure(rcg, f);
+ if (ret)
+ return ret;
+
+ return update_config(rcg);
+}
+
+static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
+ enum freq_policy policy)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ const struct freq_tbl *f;
+
+ switch (policy) {
+ case FLOOR:
+ f = qcom_find_freq_floor(rcg->freq_tbl, rate);
+ break;
+ case CEIL:
+ f = qcom_find_freq(rcg->freq_tbl, rate);
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ if (!f)
+ return -EINVAL;
+
+ return clk_rcg2_configure(rcg, f);
+}
+
+static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return __clk_rcg2_set_rate(hw, rate, CEIL);
+}
+
+static int clk_rcg2_set_floor_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return __clk_rcg2_set_rate(hw, rate, FLOOR);
+}
+
+static int clk_rcg2_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ return __clk_rcg2_set_rate(hw, rate, CEIL);
+}
+
+static int clk_rcg2_set_floor_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ return __clk_rcg2_set_rate(hw, rate, FLOOR);
+}
+
+const struct clk_ops clk_rcg2_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .determine_rate = clk_rcg2_determine_rate,
+ .set_rate = clk_rcg2_set_rate,
+ .set_rate_and_parent = clk_rcg2_set_rate_and_parent,
+};
+EXPORT_SYMBOL_GPL(clk_rcg2_ops);
+
+const struct clk_ops clk_rcg2_floor_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .determine_rate = clk_rcg2_determine_floor_rate,
+ .set_rate = clk_rcg2_set_floor_rate,
+ .set_rate_and_parent = clk_rcg2_set_floor_rate_and_parent,
+};
+EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops);
+
+struct frac_entry {
+ int num;
+ int den;
+};
+
+static const struct frac_entry frac_table_675m[] = { /* link rate of 270M */
+ { 52, 295 }, /* 119 M */
+ { 11, 57 }, /* 130.25 M */
+ { 63, 307 }, /* 138.50 M */
+ { 11, 50 }, /* 148.50 M */
+ { 47, 206 }, /* 154 M */
+ { 31, 100 }, /* 205.25 M */
+ { 107, 269 }, /* 268.50 M */
+ { },
+};
+
+static struct frac_entry frac_table_810m[] = { /* Link rate of 162M */
+ { 31, 211 }, /* 119 M */
+ { 32, 199 }, /* 130.25 M */
+ { 63, 307 }, /* 138.50 M */
+ { 11, 60 }, /* 148.50 M */
+ { 50, 263 }, /* 154 M */
+ { 31, 120 }, /* 205.25 M */
+ { 119, 359 }, /* 268.50 M */
+ { },
+};
+
+static int clk_edp_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ struct freq_tbl f = *rcg->freq_tbl;
+ const struct frac_entry *frac;
+ int delta = 100000;
+ s64 src_rate = parent_rate;
+ s64 request;
+ u32 mask = BIT(rcg->hid_width) - 1;
+ u32 hid_div;
+
+ if (src_rate == 810000000)
+ frac = frac_table_810m;
+ else
+ frac = frac_table_675m;
+
+ for (; frac->num; frac++) {
+ request = rate;
+ request *= frac->den;
+ request = div_s64(request, frac->num);
+ if ((src_rate < (request - delta)) ||
+ (src_rate > (request + delta)))
+ continue;
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
+ &hid_div);
+ f.pre_div = hid_div;
+ f.pre_div >>= CFG_SRC_DIV_SHIFT;
+ f.pre_div &= mask;
+ f.m = frac->num;
+ f.n = frac->den;
+
+ return clk_rcg2_configure(rcg, &f);
+ }
+
+ return -EINVAL;
+}
+
+static int clk_edp_pixel_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ /* Parent index is set statically in frequency table */
+ return clk_edp_pixel_set_rate(hw, rate, parent_rate);
+}
+
+static int clk_edp_pixel_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ const struct freq_tbl *f = rcg->freq_tbl;
+ const struct frac_entry *frac;
+ int delta = 100000;
+ s64 request;
+ u32 mask = BIT(rcg->hid_width) - 1;
+ u32 hid_div;
+ int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
+
+ /* Force the correct parent */
+ req->best_parent_hw = clk_hw_get_parent_by_index(hw, index);
+ req->best_parent_rate = clk_hw_get_rate(req->best_parent_hw);
+
+ if (req->best_parent_rate == 810000000)
+ frac = frac_table_810m;
+ else
+ frac = frac_table_675m;
+
+ for (; frac->num; frac++) {
+ request = req->rate;
+ request *= frac->den;
+ request = div_s64(request, frac->num);
+ if ((req->best_parent_rate < (request - delta)) ||
+ (req->best_parent_rate > (request + delta)))
+ continue;
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
+ &hid_div);
+ hid_div >>= CFG_SRC_DIV_SHIFT;
+ hid_div &= mask;
+
+ req->rate = calc_rate(req->best_parent_rate,
+ frac->num, frac->den,
+ !!frac->den, hid_div);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+const struct clk_ops clk_edp_pixel_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .set_rate = clk_edp_pixel_set_rate,
+ .set_rate_and_parent = clk_edp_pixel_set_rate_and_parent,
+ .determine_rate = clk_edp_pixel_determine_rate,
+};
+EXPORT_SYMBOL_GPL(clk_edp_pixel_ops);
+
+static int clk_byte_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ const struct freq_tbl *f = rcg->freq_tbl;
+ int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
+ unsigned long parent_rate, div;
+ u32 mask = BIT(rcg->hid_width) - 1;
+ struct clk_hw *p;
+
+ if (req->rate == 0)
+ return -EINVAL;
+
+ req->best_parent_hw = p = clk_hw_get_parent_by_index(hw, index);
+ req->best_parent_rate = parent_rate = clk_hw_round_rate(p, req->rate);
+
+ div = DIV_ROUND_UP((2 * parent_rate), req->rate) - 1;
+ div = min_t(u32, div, mask);
+
+ req->rate = calc_rate(parent_rate, 0, 0, 0, div);
+
+ return 0;
+}
+
+static int clk_byte_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ struct freq_tbl f = *rcg->freq_tbl;
+ unsigned long div;
+ u32 mask = BIT(rcg->hid_width) - 1;
+
+ div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
+ div = min_t(u32, div, mask);
+
+ f.pre_div = div;
+
+ return clk_rcg2_configure(rcg, &f);
+}
+
+static int clk_byte_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ /* Parent index is set statically in frequency table */
+ return clk_byte_set_rate(hw, rate, parent_rate);
+}
+
+const struct clk_ops clk_byte_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .set_rate = clk_byte_set_rate,
+ .set_rate_and_parent = clk_byte_set_rate_and_parent,
+ .determine_rate = clk_byte_determine_rate,
+};
+EXPORT_SYMBOL_GPL(clk_byte_ops);
+
+static int clk_byte2_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ unsigned long parent_rate, div;
+ u32 mask = BIT(rcg->hid_width) - 1;
+ struct clk_hw *p;
+ unsigned long rate = req->rate;
+
+ if (rate == 0)
+ return -EINVAL;
+
+ p = req->best_parent_hw;
+ req->best_parent_rate = parent_rate = clk_hw_round_rate(p, rate);
+
+ div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
+ div = min_t(u32, div, mask);
+
+ req->rate = calc_rate(parent_rate, 0, 0, 0, div);
+
+ return 0;
+}
+
+static int clk_byte2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ struct freq_tbl f = { 0 };
+ unsigned long div;
+ int i, num_parents = clk_hw_get_num_parents(hw);
+ u32 mask = BIT(rcg->hid_width) - 1;
+ u32 cfg;
+
+ div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
+ div = min_t(u32, div, mask);
+
+ f.pre_div = div;
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+ cfg &= CFG_SRC_SEL_MASK;
+ cfg >>= CFG_SRC_SEL_SHIFT;
+
+ for (i = 0; i < num_parents; i++) {
+ if (cfg == rcg->parent_map[i].cfg) {
+ f.src = rcg->parent_map[i].src;
+ return clk_rcg2_configure(rcg, &f);
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int clk_byte2_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ /* Read the hardware to determine parent during set_rate */
+ return clk_byte2_set_rate(hw, rate, parent_rate);
+}
+
+const struct clk_ops clk_byte2_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .set_rate = clk_byte2_set_rate,
+ .set_rate_and_parent = clk_byte2_set_rate_and_parent,
+ .determine_rate = clk_byte2_determine_rate,
+};
+EXPORT_SYMBOL_GPL(clk_byte2_ops);
+
+static const struct frac_entry frac_table_pixel[] = {
+ { 3, 8 },
+ { 2, 9 },
+ { 4, 9 },
+ { 1, 1 },
+ { }
+};
+
+static int clk_pixel_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ unsigned long request, src_rate;
+ int delta = 100000;
+ const struct frac_entry *frac = frac_table_pixel;
+
+ for (; frac->num; frac++) {
+ request = (req->rate * frac->den) / frac->num;
+
+ src_rate = clk_hw_round_rate(req->best_parent_hw, request);
+ if ((src_rate < (request - delta)) ||
+ (src_rate > (request + delta)))
+ continue;
+
+ req->best_parent_rate = src_rate;
+ req->rate = (src_rate * frac->num) / frac->den;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ struct freq_tbl f = { 0 };
+ const struct frac_entry *frac = frac_table_pixel;
+ unsigned long request;
+ int delta = 100000;
+ u32 mask = BIT(rcg->hid_width) - 1;
+ u32 hid_div, cfg;
+ int i, num_parents = clk_hw_get_num_parents(hw);
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+ cfg &= CFG_SRC_SEL_MASK;
+ cfg >>= CFG_SRC_SEL_SHIFT;
+
+ for (i = 0; i < num_parents; i++)
+ if (cfg == rcg->parent_map[i].cfg) {
+ f.src = rcg->parent_map[i].src;
+ break;
+ }
+
+ for (; frac->num; frac++) {
+ request = (rate * frac->den) / frac->num;
+
+ if ((parent_rate < (request - delta)) ||
+ (parent_rate > (request + delta)))
+ continue;
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
+ &hid_div);
+ f.pre_div = hid_div;
+ f.pre_div >>= CFG_SRC_DIV_SHIFT;
+ f.pre_div &= mask;
+ f.m = frac->num;
+ f.n = frac->den;
+
+ return clk_rcg2_configure(rcg, &f);
+ }
+ return -EINVAL;
+}
+
+static int clk_pixel_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate, u8 index)
+{
+ return clk_pixel_set_rate(hw, rate, parent_rate);
+}
+
+const struct clk_ops clk_pixel_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .set_rate = clk_pixel_set_rate,
+ .set_rate_and_parent = clk_pixel_set_rate_and_parent,
+ .determine_rate = clk_pixel_determine_rate,
+};
+EXPORT_SYMBOL_GPL(clk_pixel_ops);
+
+static int clk_gfx3d_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rate_request parent_req = { };
+ struct clk_hw *p2, *p8, *p9, *xo;
+ unsigned long p9_rate;
+ int ret;
+
+ xo = clk_hw_get_parent_by_index(hw, 0);
+ if (req->rate == clk_hw_get_rate(xo)) {
+ req->best_parent_hw = xo;
+ return 0;
+ }
+
+ p9 = clk_hw_get_parent_by_index(hw, 2);
+ p2 = clk_hw_get_parent_by_index(hw, 3);
+ p8 = clk_hw_get_parent_by_index(hw, 4);
+
+ /* PLL9 is a fixed rate PLL */
+ p9_rate = clk_hw_get_rate(p9);
+
+ parent_req.rate = req->rate = min(req->rate, p9_rate);
+ if (req->rate == p9_rate) {
+ req->rate = req->best_parent_rate = p9_rate;
+ req->best_parent_hw = p9;
+ return 0;
+ }
+
+ if (req->best_parent_hw == p9) {
+ /* Are we going back to a previously used rate? */
+ if (clk_hw_get_rate(p8) == req->rate)
+ req->best_parent_hw = p8;
+ else
+ req->best_parent_hw = p2;
+ } else if (req->best_parent_hw == p8) {
+ req->best_parent_hw = p2;
+ } else {
+ req->best_parent_hw = p8;
+ }
+
+ ret = __clk_determine_rate(req->best_parent_hw, &parent_req);
+ if (ret)
+ return ret;
+
+ req->rate = req->best_parent_rate = parent_req.rate;
+
+ return 0;
+}
+
+static int clk_gfx3d_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate, u8 index)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ u32 cfg;
+ int ret;
+
+ /* Just mux it, we don't use the division or m/n hardware */
+ cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
+ ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
+ if (ret)
+ return ret;
+
+ return update_config(rcg);
+}
+
+static int clk_gfx3d_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ /*
+ * We should never get here; clk_gfx3d_determine_rate() should always
+ * make us use a different parent than what we're currently using, so
+ * clk_gfx3d_set_rate_and_parent() should always be called.
+ */
+ return 0;
+}
+
+const struct clk_ops clk_gfx3d_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .set_rate = clk_gfx3d_set_rate,
+ .set_rate_and_parent = clk_gfx3d_set_rate_and_parent,
+ .determine_rate = clk_gfx3d_determine_rate,
+};
+EXPORT_SYMBOL_GPL(clk_gfx3d_ops);
+
+static int clk_rcg2_set_force_enable(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ const char *name = clk_hw_get_name(hw);
+ int ret, count;
+
+<<<<<<<
+=======
+ /* Force enable bit */
+>>>>>>>
+ ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
+ CMD_ROOT_EN, CMD_ROOT_EN);
+ if (ret)
+ return ret;
+
+ /* wait for RCG to turn ON */
+ for (count = 500; count > 0; count--) {
+ if (clk_rcg2_is_enabled(hw))
+ return 0;
+
+<<<<<<<
+ /* Delay for 1usec and retry polling the status bit */
+ udelay(1);
+ }
+ if (!count)
+ pr_err("%s: RCG did not turn on\n", name);
+
+=======
+ udelay(1);
+ }
+
+ pr_err("%s: RCG did not turn on\n", name);
+>>>>>>>
+ return -ETIMEDOUT;
+}
+
+static int clk_rcg2_clear_force_enable(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+<<<<<<<
+=======
+ /* Clear force enable bit */
+>>>>>>>
+ return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
+ CMD_ROOT_EN, 0);
+}
+
+static int
+<<<<<<<
+clk_rcg2_shared_force_enable_clear(struct clk_hw *hw, const struct freq_tbl *f)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+=======
+clk_rcg2_shared_force_enable_clear(struct clk_hw *hw, unsigned long rate)
+{
+>>>>>>>
+ int ret;
+
+ ret = clk_rcg2_set_force_enable(hw);
+ if (ret)
+ return ret;
+
+<<<<<<<
+ /* set clock rate */
+ ret = __clk_rcg2_set_rate(hw, rate, CEIL);
+=======
+ ret = clk_rcg2_configure(rcg, f);
+>>>>>>>
+ if (ret)
+ return ret;
+
+ return clk_rcg2_clear_force_enable(hw);
+}
+
+static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+<<<<<<<
+ const struct freq_tbl *f;
+
+ f = qcom_find_freq(rcg->freq_tbl, rate);
+ if (!f)
+ return -EINVAL;
+
+ /*
+ * In case clock is disabled, update the CFG, M, N and D registers
+ * and don't hit the update bit of CMD register.
+ */
+ if (!__clk_is_enabled(hw->clk))
+ return __clk_rcg2_configure(rcg, f);
+
+ return clk_rcg2_shared_force_enable_clear(hw, f);
+=======
+ int ret;
+
+ /*
+ * Return if the RCG is currently disabled. This configuration
+ * update will happen as part of the RCG enable sequence.
+ */
+ if (!__clk_is_enabled(hw->clk)) {
+ rcg->current_freq = rate;
+ return 0;
+ }
+
+ ret = clk_rcg2_shared_force_enable_clear(hw, rate);
+ if (ret)
+ return ret;
+
+ /* Update current frequency with the requested frequency. */
+ rcg->current_freq = rate;
+
+ return ret;
+>>>>>>>
+}
+
+static int clk_rcg2_shared_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ return clk_rcg2_shared_set_rate(hw, rate, parent_rate);
+}
+
+<<<<<<<
+static int clk_rcg2_shared_enable(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ int ret;
+
+ /*
+ * Set the update bit because required configuration has already
+ * been written in clk_rcg2_shared_set_rate()
+ */
+ ret = clk_rcg2_set_force_enable(hw);
+ if (ret)
+ return ret;
+
+ ret = update_config(rcg);
+ if (ret)
+ return ret;
+
+ return clk_rcg2_clear_force_enable(hw);
+=======
+static unsigned long
+clk_rcg2_shared_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ if (!__clk_is_enabled(hw->clk)) {
+ if (!rcg->current_freq) {
+ if (!clk_rcg2_get_parent(hw))
+ rcg->current_freq =
+ rcg->safe_src_freq_tbl->freq;
+ else
+ rcg->current_freq =
+ clk_rcg2_recalc_rate(hw, parent_rate);
+ }
+
+ return rcg->current_freq;
+ }
+
+ return rcg->current_freq = clk_rcg2_recalc_rate(hw, parent_rate);
+}
+
+static int clk_rcg2_shared_enable(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ if (rcg->current_freq == rcg->safe_src_freq_tbl->freq) {
+ clk_rcg2_set_force_enable(hw);
+ clk_rcg2_configure(rcg, rcg->safe_src_freq_tbl);
+ clk_rcg2_clear_force_enable(hw);
+
+ return 0;
+ }
+
+ /*
+ * Switch from safe source to the stashed mux selection. The current
+ * parent has already been prepared and enabled at this point, and
+ * the safe source is always on while application processor subsystem
+ * is online. Therefore, the RCG can safely switch its source.
+ */
+
+ return clk_rcg2_shared_force_enable_clear(hw, rcg->current_freq);
+>>>>>>>
+}
+
+static void clk_rcg2_shared_disable(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+<<<<<<<
+ u32 cfg;
+
+ /*
+ * Store current configuration as switching to safe source would clear
+ * the SRC and DIV of CFG register
+ */
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+
+ /*
+ * Park the RCG at a safe configuration - sourced off of safe source.
+=======
+
+ /*
+ * Park the RCG at a safe configuration - sourced off from safe source.
+>>>>>>>
+ * Force enable and disable the RCG while configuring it to safeguard
+ * against any update signal coming from the downstream clock.
+ * The current parent is still prepared and enabled at this point, and
+ * the safe source is always on while application processor subsystem
+ * is online. Therefore, the RCG can safely switch its parent.
+ */
+ clk_rcg2_set_force_enable(hw);
+<<<<<<<
+ clk_rcg2_configure(rcg, rcg->safe_src_freq_tbl);
+ clk_rcg2_clear_force_enable(hw);
+=======
+
+ regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
+ rcg->safe_src_index << CFG_SRC_SEL_SHIFT);
+
+ update_config(rcg);
+
+ clk_rcg2_clear_force_enable(hw);
+
+ /* Write back the stored configuration corresponding to current rate */
+ regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
+>>>>>>>
+}
+
+const struct clk_ops clk_rcg2_shared_ops = {
+ .enable = clk_rcg2_shared_enable,
+ .disable = clk_rcg2_shared_disable,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+<<<<<<<
+ .recalc_rate = clk_rcg2_recalc_rate,
+=======
+ .recalc_rate = clk_rcg2_shared_recalc_rate,
+>>>>>>>
+ .determine_rate = clk_rcg2_determine_rate,
+ .set_rate = clk_rcg2_shared_set_rate,
+ .set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent,
+};
+EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);
diff --git a/rr-cache/41a5a26760c0c7d81b8463f6e476126578e8739e/preimage.1 b/rr-cache/41a5a26760c0c7d81b8463f6e476126578e8739e/preimage.1
new file mode 100644
index 0000000..c41530d
--- /dev/null
+++ b/rr-cache/41a5a26760c0c7d81b8463f6e476126578e8739e/preimage.1
@@ -0,0 +1,1051 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved.
+<<<<<<<
+=======
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+>>>>>>>
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/bug.h>
+#include <linux/export.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/regmap.h>
+#include <linux/math64.h>
+
+#include <asm/div64.h>
+
+#include "clk-rcg.h"
+#include "common.h"
+
+#define CMD_REG 0x0
+#define CMD_UPDATE BIT(0)
+#define CMD_ROOT_EN BIT(1)
+#define CMD_DIRTY_CFG BIT(4)
+#define CMD_DIRTY_N BIT(5)
+#define CMD_DIRTY_M BIT(6)
+#define CMD_DIRTY_D BIT(7)
+#define CMD_ROOT_OFF BIT(31)
+
+#define CFG_REG 0x4
+#define CFG_SRC_DIV_SHIFT 0
+#define CFG_SRC_SEL_SHIFT 8
+#define CFG_SRC_SEL_MASK (0x7 << CFG_SRC_SEL_SHIFT)
+#define CFG_MODE_SHIFT 12
+#define CFG_MODE_MASK (0x3 << CFG_MODE_SHIFT)
+#define CFG_MODE_DUAL_EDGE (0x2 << CFG_MODE_SHIFT)
+#define CFG_HW_CLK_CTRL_MASK BIT(20)
+
+#define M_REG 0x8
+#define N_REG 0xc
+#define D_REG 0x10
+
+enum freq_policy {
+ FLOOR,
+ CEIL,
+};
+
+static int clk_rcg2_is_enabled(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ u32 cmd;
+ int ret;
+
+ ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
+ if (ret)
+ return ret;
+
+ return (cmd & CMD_ROOT_OFF) == 0;
+}
+
+static u8 clk_rcg2_get_parent(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ int num_parents = clk_hw_get_num_parents(hw);
+ u32 cfg;
+ int i, ret;
+
+ ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+ if (ret)
+ goto err;
+
+ cfg &= CFG_SRC_SEL_MASK;
+ cfg >>= CFG_SRC_SEL_SHIFT;
+
+ for (i = 0; i < num_parents; i++)
+ if (cfg == rcg->parent_map[i].cfg)
+ return i;
+
+err:
+ pr_debug("%s: Clock %s has invalid parent, using default.\n",
+ __func__, clk_hw_get_name(hw));
+ return 0;
+}
+
+static int update_config(struct clk_rcg2 *rcg)
+{
+ int count, ret;
+ u32 cmd;
+ struct clk_hw *hw = &rcg->clkr.hw;
+ const char *name = clk_hw_get_name(hw);
+
+ ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
+ CMD_UPDATE, CMD_UPDATE);
+ if (ret)
+ return ret;
+
+ /* Wait for update to take effect */
+ for (count = 500; count > 0; count--) {
+ ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
+ if (ret)
+ return ret;
+ if (!(cmd & CMD_UPDATE))
+ return 0;
+ udelay(1);
+ }
+
+ WARN(1, "%s: rcg didn't update its configuration.", name);
+ return 0;
+}
+
+static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ int ret;
+ u32 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
+
+ ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
+ CFG_SRC_SEL_MASK, cfg);
+ if (ret)
+ return ret;
+
+ return update_config(rcg);
+}
+
+/*
+ * Calculate m/n:d rate
+ *
+ * parent_rate m
+ * rate = ----------- x ---
+ * hid_div n
+ */
+static unsigned long
+calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
+{
+ if (hid_div) {
+ rate *= 2;
+ rate /= hid_div + 1;
+ }
+
+ if (mode) {
+ u64 tmp = rate;
+ tmp *= m;
+ do_div(tmp, n);
+ rate = tmp;
+ }
+
+ return rate;
+}
+
+static unsigned long
+clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ u32 cfg, hid_div, m = 0, n = 0, mode = 0, mask;
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+
+ if (rcg->mnd_width) {
+ mask = BIT(rcg->mnd_width) - 1;
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + M_REG, &m);
+ m &= mask;
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + N_REG, &n);
+ n = ~n;
+ n &= mask;
+ n += m;
+ mode = cfg & CFG_MODE_MASK;
+ mode >>= CFG_MODE_SHIFT;
+ }
+
+ mask = BIT(rcg->hid_width) - 1;
+ hid_div = cfg >> CFG_SRC_DIV_SHIFT;
+ hid_div &= mask;
+
+ return calc_rate(parent_rate, m, n, mode, hid_div);
+}
+
+static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
+ struct clk_rate_request *req,
+ enum freq_policy policy)
+{
+ unsigned long clk_flags, rate = req->rate;
+ struct clk_hw *p;
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ int index;
+
+ switch (policy) {
+ case FLOOR:
+ f = qcom_find_freq_floor(f, rate);
+ break;
+ case CEIL:
+ f = qcom_find_freq(f, rate);
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ if (!f)
+ return -EINVAL;
+
+ index = qcom_find_src_index(hw, rcg->parent_map, f->src);
+ if (index < 0)
+ return index;
+
+ clk_flags = clk_hw_get_flags(hw);
+ p = clk_hw_get_parent_by_index(hw, index);
+ if (clk_flags & CLK_SET_RATE_PARENT) {
+ rate = f->freq;
+ if (f->pre_div) {
+ rate /= 2;
+ rate *= f->pre_div + 1;
+ }
+
+ if (f->n) {
+ u64 tmp = rate;
+ tmp = tmp * f->n;
+ do_div(tmp, f->m);
+ rate = tmp;
+ }
+ } else {
+ rate = clk_hw_get_rate(p);
+ }
+ req->best_parent_hw = p;
+ req->best_parent_rate = rate;
+ req->rate = f->freq;
+
+ return 0;
+}
+
+static int clk_rcg2_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, CEIL);
+}
+
+static int clk_rcg2_determine_floor_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR);
+}
+
+static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
+{
+ u32 cfg, mask;
+ struct clk_hw *hw = &rcg->clkr.hw;
+ int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src);
+
+ if (index < 0)
+ return index;
+
+ if (rcg->mnd_width && f->n) {
+ mask = BIT(rcg->mnd_width) - 1;
+ ret = regmap_update_bits(rcg->clkr.regmap,
+ rcg->cmd_rcgr + M_REG, mask, f->m);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(rcg->clkr.regmap,
+ rcg->cmd_rcgr + N_REG, mask, ~(f->n - f->m));
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(rcg->clkr.regmap,
+ rcg->cmd_rcgr + D_REG, mask, ~f->n);
+ if (ret)
+ return ret;
+ }
+
+ mask = BIT(rcg->hid_width) - 1;
+ mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK | CFG_HW_CLK_CTRL_MASK;
+ cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
+ cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
+ if (rcg->mnd_width && f->n && (f->m != f->n))
+ cfg |= CFG_MODE_DUAL_EDGE;
+
+ return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
+ mask, cfg);
+}
+
+static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
+{
+ int ret;
+
+ ret = __clk_rcg2_configure(rcg, f);
+ if (ret)
+ return ret;
+
+ return update_config(rcg);
+}
+
+static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
+ enum freq_policy policy)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ const struct freq_tbl *f;
+
+ switch (policy) {
+ case FLOOR:
+ f = qcom_find_freq_floor(rcg->freq_tbl, rate);
+ break;
+ case CEIL:
+ f = qcom_find_freq(rcg->freq_tbl, rate);
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ if (!f)
+ return -EINVAL;
+
+ return clk_rcg2_configure(rcg, f);
+}
+
+static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return __clk_rcg2_set_rate(hw, rate, CEIL);
+}
+
+static int clk_rcg2_set_floor_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return __clk_rcg2_set_rate(hw, rate, FLOOR);
+}
+
+static int clk_rcg2_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ return __clk_rcg2_set_rate(hw, rate, CEIL);
+}
+
+static int clk_rcg2_set_floor_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ return __clk_rcg2_set_rate(hw, rate, FLOOR);
+}
+
+const struct clk_ops clk_rcg2_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .determine_rate = clk_rcg2_determine_rate,
+ .set_rate = clk_rcg2_set_rate,
+ .set_rate_and_parent = clk_rcg2_set_rate_and_parent,
+};
+EXPORT_SYMBOL_GPL(clk_rcg2_ops);
+
+const struct clk_ops clk_rcg2_floor_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .determine_rate = clk_rcg2_determine_floor_rate,
+ .set_rate = clk_rcg2_set_floor_rate,
+ .set_rate_and_parent = clk_rcg2_set_floor_rate_and_parent,
+};
+EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops);
+
+struct frac_entry {
+ int num;
+ int den;
+};
+
+static const struct frac_entry frac_table_675m[] = { /* link rate of 270M */
+ { 52, 295 }, /* 119 M */
+ { 11, 57 }, /* 130.25 M */
+ { 63, 307 }, /* 138.50 M */
+ { 11, 50 }, /* 148.50 M */
+ { 47, 206 }, /* 154 M */
+ { 31, 100 }, /* 205.25 M */
+ { 107, 269 }, /* 268.50 M */
+ { },
+};
+
+static struct frac_entry frac_table_810m[] = { /* Link rate of 162M */
+ { 31, 211 }, /* 119 M */
+ { 32, 199 }, /* 130.25 M */
+ { 63, 307 }, /* 138.50 M */
+ { 11, 60 }, /* 148.50 M */
+ { 50, 263 }, /* 154 M */
+ { 31, 120 }, /* 205.25 M */
+ { 119, 359 }, /* 268.50 M */
+ { },
+};
+
+static int clk_edp_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ struct freq_tbl f = *rcg->freq_tbl;
+ const struct frac_entry *frac;
+ int delta = 100000;
+ s64 src_rate = parent_rate;
+ s64 request;
+ u32 mask = BIT(rcg->hid_width) - 1;
+ u32 hid_div;
+
+ if (src_rate == 810000000)
+ frac = frac_table_810m;
+ else
+ frac = frac_table_675m;
+
+ for (; frac->num; frac++) {
+ request = rate;
+ request *= frac->den;
+ request = div_s64(request, frac->num);
+ if ((src_rate < (request - delta)) ||
+ (src_rate > (request + delta)))
+ continue;
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
+ &hid_div);
+ f.pre_div = hid_div;
+ f.pre_div >>= CFG_SRC_DIV_SHIFT;
+ f.pre_div &= mask;
+ f.m = frac->num;
+ f.n = frac->den;
+
+ return clk_rcg2_configure(rcg, &f);
+ }
+
+ return -EINVAL;
+}
+
+static int clk_edp_pixel_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ /* Parent index is set statically in frequency table */
+ return clk_edp_pixel_set_rate(hw, rate, parent_rate);
+}
+
+static int clk_edp_pixel_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ const struct freq_tbl *f = rcg->freq_tbl;
+ const struct frac_entry *frac;
+ int delta = 100000;
+ s64 request;
+ u32 mask = BIT(rcg->hid_width) - 1;
+ u32 hid_div;
+ int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
+
+ /* Force the correct parent */
+ req->best_parent_hw = clk_hw_get_parent_by_index(hw, index);
+ req->best_parent_rate = clk_hw_get_rate(req->best_parent_hw);
+
+ if (req->best_parent_rate == 810000000)
+ frac = frac_table_810m;
+ else
+ frac = frac_table_675m;
+
+ for (; frac->num; frac++) {
+ request = req->rate;
+ request *= frac->den;
+ request = div_s64(request, frac->num);
+ if ((req->best_parent_rate < (request - delta)) ||
+ (req->best_parent_rate > (request + delta)))
+ continue;
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
+ &hid_div);
+ hid_div >>= CFG_SRC_DIV_SHIFT;
+ hid_div &= mask;
+
+ req->rate = calc_rate(req->best_parent_rate,
+ frac->num, frac->den,
+ !!frac->den, hid_div);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+const struct clk_ops clk_edp_pixel_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .set_rate = clk_edp_pixel_set_rate,
+ .set_rate_and_parent = clk_edp_pixel_set_rate_and_parent,
+ .determine_rate = clk_edp_pixel_determine_rate,
+};
+EXPORT_SYMBOL_GPL(clk_edp_pixel_ops);
+
+static int clk_byte_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ const struct freq_tbl *f = rcg->freq_tbl;
+ int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
+ unsigned long parent_rate, div;
+ u32 mask = BIT(rcg->hid_width) - 1;
+ struct clk_hw *p;
+
+ if (req->rate == 0)
+ return -EINVAL;
+
+ req->best_parent_hw = p = clk_hw_get_parent_by_index(hw, index);
+ req->best_parent_rate = parent_rate = clk_hw_round_rate(p, req->rate);
+
+ div = DIV_ROUND_UP((2 * parent_rate), req->rate) - 1;
+ div = min_t(u32, div, mask);
+
+ req->rate = calc_rate(parent_rate, 0, 0, 0, div);
+
+ return 0;
+}
+
+static int clk_byte_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ struct freq_tbl f = *rcg->freq_tbl;
+ unsigned long div;
+ u32 mask = BIT(rcg->hid_width) - 1;
+
+ div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
+ div = min_t(u32, div, mask);
+
+ f.pre_div = div;
+
+ return clk_rcg2_configure(rcg, &f);
+}
+
+static int clk_byte_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ /* Parent index is set statically in frequency table */
+ return clk_byte_set_rate(hw, rate, parent_rate);
+}
+
+const struct clk_ops clk_byte_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .set_rate = clk_byte_set_rate,
+ .set_rate_and_parent = clk_byte_set_rate_and_parent,
+ .determine_rate = clk_byte_determine_rate,
+};
+EXPORT_SYMBOL_GPL(clk_byte_ops);
+
+static int clk_byte2_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ unsigned long parent_rate, div;
+ u32 mask = BIT(rcg->hid_width) - 1;
+ struct clk_hw *p;
+ unsigned long rate = req->rate;
+
+ if (rate == 0)
+ return -EINVAL;
+
+ p = req->best_parent_hw;
+ req->best_parent_rate = parent_rate = clk_hw_round_rate(p, rate);
+
+ div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
+ div = min_t(u32, div, mask);
+
+ req->rate = calc_rate(parent_rate, 0, 0, 0, div);
+
+ return 0;
+}
+
+static int clk_byte2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ struct freq_tbl f = { 0 };
+ unsigned long div;
+ int i, num_parents = clk_hw_get_num_parents(hw);
+ u32 mask = BIT(rcg->hid_width) - 1;
+ u32 cfg;
+
+ div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
+ div = min_t(u32, div, mask);
+
+ f.pre_div = div;
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+ cfg &= CFG_SRC_SEL_MASK;
+ cfg >>= CFG_SRC_SEL_SHIFT;
+
+ for (i = 0; i < num_parents; i++) {
+ if (cfg == rcg->parent_map[i].cfg) {
+ f.src = rcg->parent_map[i].src;
+ return clk_rcg2_configure(rcg, &f);
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int clk_byte2_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ /* Read the hardware to determine parent during set_rate */
+ return clk_byte2_set_rate(hw, rate, parent_rate);
+}
+
+const struct clk_ops clk_byte2_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .set_rate = clk_byte2_set_rate,
+ .set_rate_and_parent = clk_byte2_set_rate_and_parent,
+ .determine_rate = clk_byte2_determine_rate,
+};
+EXPORT_SYMBOL_GPL(clk_byte2_ops);
+
+static const struct frac_entry frac_table_pixel[] = {
+ { 3, 8 },
+ { 2, 9 },
+ { 4, 9 },
+ { 1, 1 },
+ { }
+};
+
+static int clk_pixel_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ unsigned long request, src_rate;
+ int delta = 100000;
+ const struct frac_entry *frac = frac_table_pixel;
+
+ for (; frac->num; frac++) {
+ request = (req->rate * frac->den) / frac->num;
+
+ src_rate = clk_hw_round_rate(req->best_parent_hw, request);
+ if ((src_rate < (request - delta)) ||
+ (src_rate > (request + delta)))
+ continue;
+
+ req->best_parent_rate = src_rate;
+ req->rate = (src_rate * frac->num) / frac->den;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ struct freq_tbl f = { 0 };
+ const struct frac_entry *frac = frac_table_pixel;
+ unsigned long request;
+ int delta = 100000;
+ u32 mask = BIT(rcg->hid_width) - 1;
+ u32 hid_div, cfg;
+ int i, num_parents = clk_hw_get_num_parents(hw);
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+ cfg &= CFG_SRC_SEL_MASK;
+ cfg >>= CFG_SRC_SEL_SHIFT;
+
+ for (i = 0; i < num_parents; i++)
+ if (cfg == rcg->parent_map[i].cfg) {
+ f.src = rcg->parent_map[i].src;
+ break;
+ }
+
+ for (; frac->num; frac++) {
+ request = (rate * frac->den) / frac->num;
+
+ if ((parent_rate < (request - delta)) ||
+ (parent_rate > (request + delta)))
+ continue;
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
+ &hid_div);
+ f.pre_div = hid_div;
+ f.pre_div >>= CFG_SRC_DIV_SHIFT;
+ f.pre_div &= mask;
+ f.m = frac->num;
+ f.n = frac->den;
+
+ return clk_rcg2_configure(rcg, &f);
+ }
+ return -EINVAL;
+}
+
+static int clk_pixel_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate, u8 index)
+{
+ return clk_pixel_set_rate(hw, rate, parent_rate);
+}
+
+const struct clk_ops clk_pixel_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .set_rate = clk_pixel_set_rate,
+ .set_rate_and_parent = clk_pixel_set_rate_and_parent,
+ .determine_rate = clk_pixel_determine_rate,
+};
+EXPORT_SYMBOL_GPL(clk_pixel_ops);
+
+static int clk_gfx3d_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rate_request parent_req = { };
+ struct clk_hw *p2, *p8, *p9, *xo;
+ unsigned long p9_rate;
+ int ret;
+
+ xo = clk_hw_get_parent_by_index(hw, 0);
+ if (req->rate == clk_hw_get_rate(xo)) {
+ req->best_parent_hw = xo;
+ return 0;
+ }
+
+ p9 = clk_hw_get_parent_by_index(hw, 2);
+ p2 = clk_hw_get_parent_by_index(hw, 3);
+ p8 = clk_hw_get_parent_by_index(hw, 4);
+
+ /* PLL9 is a fixed rate PLL */
+ p9_rate = clk_hw_get_rate(p9);
+
+ parent_req.rate = req->rate = min(req->rate, p9_rate);
+ if (req->rate == p9_rate) {
+ req->rate = req->best_parent_rate = p9_rate;
+ req->best_parent_hw = p9;
+ return 0;
+ }
+
+ if (req->best_parent_hw == p9) {
+ /* Are we going back to a previously used rate? */
+ if (clk_hw_get_rate(p8) == req->rate)
+ req->best_parent_hw = p8;
+ else
+ req->best_parent_hw = p2;
+ } else if (req->best_parent_hw == p8) {
+ req->best_parent_hw = p2;
+ } else {
+ req->best_parent_hw = p8;
+ }
+
+ ret = __clk_determine_rate(req->best_parent_hw, &parent_req);
+ if (ret)
+ return ret;
+
+ req->rate = req->best_parent_rate = parent_req.rate;
+
+ return 0;
+}
+
+static int clk_gfx3d_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate, u8 index)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ u32 cfg;
+ int ret;
+
+ /* Just mux it, we don't use the division or m/n hardware */
+ cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
+ ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
+ if (ret)
+ return ret;
+
+ return update_config(rcg);
+}
+
+static int clk_gfx3d_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ /*
+ * We should never get here; clk_gfx3d_determine_rate() should always
+ * make us use a different parent than what we're currently using, so
+ * clk_gfx3d_set_rate_and_parent() should always be called.
+ */
+ return 0;
+}
+
+const struct clk_ops clk_gfx3d_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .set_rate = clk_gfx3d_set_rate,
+ .set_rate_and_parent = clk_gfx3d_set_rate_and_parent,
+ .determine_rate = clk_gfx3d_determine_rate,
+};
+EXPORT_SYMBOL_GPL(clk_gfx3d_ops);
+
+static int clk_rcg2_set_force_enable(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ const char *name = clk_hw_get_name(hw);
+ int ret, count;
+
+<<<<<<<
+=======
+ /* Force enable bit */
+>>>>>>>
+ ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
+ CMD_ROOT_EN, CMD_ROOT_EN);
+ if (ret)
+ return ret;
+
+ /* wait for RCG to turn ON */
+ for (count = 500; count > 0; count--) {
+ if (clk_rcg2_is_enabled(hw))
+ return 0;
+
+<<<<<<<
+ /* Delay for 1usec and retry polling the status bit */
+ udelay(1);
+ }
+ if (!count)
+ pr_err("%s: RCG did not turn on\n", name);
+
+=======
+ udelay(1);
+ }
+
+ pr_err("%s: RCG did not turn on\n", name);
+>>>>>>>
+ return -ETIMEDOUT;
+}
+
+static int clk_rcg2_clear_force_enable(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+<<<<<<<
+=======
+ /* Clear force enable bit */
+>>>>>>>
+ return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
+ CMD_ROOT_EN, 0);
+}
+
+static int
+<<<<<<<
+clk_rcg2_shared_force_enable_clear(struct clk_hw *hw, const struct freq_tbl *f)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+=======
+clk_rcg2_shared_force_enable_clear(struct clk_hw *hw, unsigned long rate)
+{
+>>>>>>>
+ int ret;
+
+ ret = clk_rcg2_set_force_enable(hw);
+ if (ret)
+ return ret;
+
+<<<<<<<
+ /* set clock rate */
+ ret = __clk_rcg2_set_rate(hw, rate, CEIL);
+=======
+ ret = clk_rcg2_configure(rcg, f);
+>>>>>>>
+ if (ret)
+ return ret;
+
+ return clk_rcg2_clear_force_enable(hw);
+}
+
+static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+<<<<<<<
+ const struct freq_tbl *f;
+
+ f = qcom_find_freq(rcg->freq_tbl, rate);
+ if (!f)
+ return -EINVAL;
+
+ /*
+ * In case clock is disabled, update the CFG, M, N and D registers
+ * and don't hit the update bit of CMD register.
+ */
+ if (!__clk_is_enabled(hw->clk))
+ return __clk_rcg2_configure(rcg, f);
+
+ return clk_rcg2_shared_force_enable_clear(hw, f);
+=======
+ int ret;
+
+ /*
+ * Return if the RCG is currently disabled. This configuration
+ * update will happen as part of the RCG enable sequence.
+ */
+ if (!__clk_is_enabled(hw->clk)) {
+ rcg->current_freq = rate;
+ return 0;
+ }
+
+ ret = clk_rcg2_shared_force_enable_clear(hw, rate);
+ if (ret)
+ return ret;
+
+ /* Update current frequency with the requested frequency. */
+ rcg->current_freq = rate;
+
+ return ret;
+>>>>>>>
+}
+
+static int clk_rcg2_shared_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ return clk_rcg2_shared_set_rate(hw, rate, parent_rate);
+}
+
+<<<<<<<
+static int clk_rcg2_shared_enable(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ int ret;
+
+ /*
+ * Set the update bit because required configuration has already
+ * been written in clk_rcg2_shared_set_rate()
+ */
+ ret = clk_rcg2_set_force_enable(hw);
+ if (ret)
+ return ret;
+
+ ret = update_config(rcg);
+ if (ret)
+ return ret;
+
+ return clk_rcg2_clear_force_enable(hw);
+=======
+static unsigned long
+clk_rcg2_shared_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ if (!__clk_is_enabled(hw->clk)) {
+ if (!rcg->current_freq) {
+ if (!clk_rcg2_get_parent(hw))
+ rcg->current_freq =
+ rcg->safe_src_freq_tbl->freq;
+ else
+ rcg->current_freq =
+ clk_rcg2_recalc_rate(hw, parent_rate);
+ }
+
+ return rcg->current_freq;
+ }
+
+ return rcg->current_freq = clk_rcg2_recalc_rate(hw, parent_rate);
+}
+
+static int clk_rcg2_shared_enable(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ if (rcg->current_freq == rcg->safe_src_freq_tbl->freq) {
+ clk_rcg2_set_force_enable(hw);
+ clk_rcg2_configure(rcg, rcg->safe_src_freq_tbl);
+ clk_rcg2_clear_force_enable(hw);
+
+ return 0;
+ }
+
+ /*
+ * Switch from safe source to the stashed mux selection. The current
+ * parent has already been prepared and enabled at this point, and
+ * the safe source is always on while application processor subsystem
+ * is online. Therefore, the RCG can safely switch its source.
+ */
+
+ return clk_rcg2_shared_force_enable_clear(hw, rcg->current_freq);
+>>>>>>>
+}
+
+static void clk_rcg2_shared_disable(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+<<<<<<<
+ u32 cfg;
+
+ /*
+ * Store current configuration as switching to safe source would clear
+ * the SRC and DIV of CFG register
+ */
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+
+ /*
+ * Park the RCG at a safe configuration - sourced off of safe source.
+=======
+
+ /*
+ * Park the RCG at a safe configuration - sourced off from safe source.
+>>>>>>>
+ * Force enable and disable the RCG while configuring it to safeguard
+ * against any update signal coming from the downstream clock.
+ * The current parent is still prepared and enabled at this point, and
+ * the safe source is always on while application processor subsystem
+ * is online. Therefore, the RCG can safely switch its parent.
+ */
+ clk_rcg2_set_force_enable(hw);
+<<<<<<<
+ clk_rcg2_configure(rcg, rcg->safe_src_freq_tbl);
+ clk_rcg2_clear_force_enable(hw);
+=======
+
+ regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
+ rcg->safe_src_index << CFG_SRC_SEL_SHIFT);
+
+ update_config(rcg);
+
+ clk_rcg2_clear_force_enable(hw);
+
+ /* Write back the stored configuration corresponding to current rate */
+ regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
+>>>>>>>
+}
+
+const struct clk_ops clk_rcg2_shared_ops = {
+ .enable = clk_rcg2_shared_enable,
+ .disable = clk_rcg2_shared_disable,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+<<<<<<<
+ .recalc_rate = clk_rcg2_recalc_rate,
+=======
+ .recalc_rate = clk_rcg2_shared_recalc_rate,
+>>>>>>>
+ .determine_rate = clk_rcg2_determine_rate,
+ .set_rate = clk_rcg2_shared_set_rate,
+ .set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent,
+};
+EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);
diff --git a/rr-cache/41a5a26760c0c7d81b8463f6e476126578e8739e/preimage.2 b/rr-cache/41a5a26760c0c7d81b8463f6e476126578e8739e/preimage.2
new file mode 100644
index 0000000..c41530d
--- /dev/null
+++ b/rr-cache/41a5a26760c0c7d81b8463f6e476126578e8739e/preimage.2
@@ -0,0 +1,1051 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved.
+<<<<<<<
+=======
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+>>>>>>>
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/bug.h>
+#include <linux/export.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/regmap.h>
+#include <linux/math64.h>
+
+#include <asm/div64.h>
+
+#include "clk-rcg.h"
+#include "common.h"
+
+#define CMD_REG 0x0
+#define CMD_UPDATE BIT(0)
+#define CMD_ROOT_EN BIT(1)
+#define CMD_DIRTY_CFG BIT(4)
+#define CMD_DIRTY_N BIT(5)
+#define CMD_DIRTY_M BIT(6)
+#define CMD_DIRTY_D BIT(7)
+#define CMD_ROOT_OFF BIT(31)
+
+#define CFG_REG 0x4
+#define CFG_SRC_DIV_SHIFT 0
+#define CFG_SRC_SEL_SHIFT 8
+#define CFG_SRC_SEL_MASK (0x7 << CFG_SRC_SEL_SHIFT)
+#define CFG_MODE_SHIFT 12
+#define CFG_MODE_MASK (0x3 << CFG_MODE_SHIFT)
+#define CFG_MODE_DUAL_EDGE (0x2 << CFG_MODE_SHIFT)
+#define CFG_HW_CLK_CTRL_MASK BIT(20)
+
+#define M_REG 0x8
+#define N_REG 0xc
+#define D_REG 0x10
+
+enum freq_policy {
+ FLOOR,
+ CEIL,
+};
+
+static int clk_rcg2_is_enabled(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ u32 cmd;
+ int ret;
+
+ ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
+ if (ret)
+ return ret;
+
+ return (cmd & CMD_ROOT_OFF) == 0;
+}
+
+static u8 clk_rcg2_get_parent(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ int num_parents = clk_hw_get_num_parents(hw);
+ u32 cfg;
+ int i, ret;
+
+ ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+ if (ret)
+ goto err;
+
+ cfg &= CFG_SRC_SEL_MASK;
+ cfg >>= CFG_SRC_SEL_SHIFT;
+
+ for (i = 0; i < num_parents; i++)
+ if (cfg == rcg->parent_map[i].cfg)
+ return i;
+
+err:
+ pr_debug("%s: Clock %s has invalid parent, using default.\n",
+ __func__, clk_hw_get_name(hw));
+ return 0;
+}
+
+static int update_config(struct clk_rcg2 *rcg)
+{
+ int count, ret;
+ u32 cmd;
+ struct clk_hw *hw = &rcg->clkr.hw;
+ const char *name = clk_hw_get_name(hw);
+
+ ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
+ CMD_UPDATE, CMD_UPDATE);
+ if (ret)
+ return ret;
+
+ /* Wait for update to take effect */
+ for (count = 500; count > 0; count--) {
+ ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
+ if (ret)
+ return ret;
+ if (!(cmd & CMD_UPDATE))
+ return 0;
+ udelay(1);
+ }
+
+ WARN(1, "%s: rcg didn't update its configuration.", name);
+ return 0;
+}
+
+static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ int ret;
+ u32 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
+
+ ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
+ CFG_SRC_SEL_MASK, cfg);
+ if (ret)
+ return ret;
+
+ return update_config(rcg);
+}
+
+/*
+ * Calculate m/n:d rate
+ *
+ * parent_rate m
+ * rate = ----------- x ---
+ * hid_div n
+ */
+static unsigned long
+calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
+{
+ if (hid_div) {
+ rate *= 2;
+ rate /= hid_div + 1;
+ }
+
+ if (mode) {
+ u64 tmp = rate;
+ tmp *= m;
+ do_div(tmp, n);
+ rate = tmp;
+ }
+
+ return rate;
+}
+
+static unsigned long
+clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ u32 cfg, hid_div, m = 0, n = 0, mode = 0, mask;
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+
+ if (rcg->mnd_width) {
+ mask = BIT(rcg->mnd_width) - 1;
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + M_REG, &m);
+ m &= mask;
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + N_REG, &n);
+ n = ~n;
+ n &= mask;
+ n += m;
+ mode = cfg & CFG_MODE_MASK;
+ mode >>= CFG_MODE_SHIFT;
+ }
+
+ mask = BIT(rcg->hid_width) - 1;
+ hid_div = cfg >> CFG_SRC_DIV_SHIFT;
+ hid_div &= mask;
+
+ return calc_rate(parent_rate, m, n, mode, hid_div);
+}
+
+static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
+ struct clk_rate_request *req,
+ enum freq_policy policy)
+{
+ unsigned long clk_flags, rate = req->rate;
+ struct clk_hw *p;
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ int index;
+
+ switch (policy) {
+ case FLOOR:
+ f = qcom_find_freq_floor(f, rate);
+ break;
+ case CEIL:
+ f = qcom_find_freq(f, rate);
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ if (!f)
+ return -EINVAL;
+
+ index = qcom_find_src_index(hw, rcg->parent_map, f->src);
+ if (index < 0)
+ return index;
+
+ clk_flags = clk_hw_get_flags(hw);
+ p = clk_hw_get_parent_by_index(hw, index);
+ if (clk_flags & CLK_SET_RATE_PARENT) {
+ rate = f->freq;
+ if (f->pre_div) {
+ rate /= 2;
+ rate *= f->pre_div + 1;
+ }
+
+ if (f->n) {
+ u64 tmp = rate;
+ tmp = tmp * f->n;
+ do_div(tmp, f->m);
+ rate = tmp;
+ }
+ } else {
+ rate = clk_hw_get_rate(p);
+ }
+ req->best_parent_hw = p;
+ req->best_parent_rate = rate;
+ req->rate = f->freq;
+
+ return 0;
+}
+
+static int clk_rcg2_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, CEIL);
+}
+
+static int clk_rcg2_determine_floor_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR);
+}
+
+static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
+{
+ u32 cfg, mask;
+ struct clk_hw *hw = &rcg->clkr.hw;
+ int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src);
+
+ if (index < 0)
+ return index;
+
+ if (rcg->mnd_width && f->n) {
+ mask = BIT(rcg->mnd_width) - 1;
+ ret = regmap_update_bits(rcg->clkr.regmap,
+ rcg->cmd_rcgr + M_REG, mask, f->m);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(rcg->clkr.regmap,
+ rcg->cmd_rcgr + N_REG, mask, ~(f->n - f->m));
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(rcg->clkr.regmap,
+ rcg->cmd_rcgr + D_REG, mask, ~f->n);
+ if (ret)
+ return ret;
+ }
+
+ mask = BIT(rcg->hid_width) - 1;
+ mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK | CFG_HW_CLK_CTRL_MASK;
+ cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
+ cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
+ if (rcg->mnd_width && f->n && (f->m != f->n))
+ cfg |= CFG_MODE_DUAL_EDGE;
+
+ return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
+ mask, cfg);
+}
+
+static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
+{
+ int ret;
+
+ ret = __clk_rcg2_configure(rcg, f);
+ if (ret)
+ return ret;
+
+ return update_config(rcg);
+}
+
+static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
+ enum freq_policy policy)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ const struct freq_tbl *f;
+
+ switch (policy) {
+ case FLOOR:
+ f = qcom_find_freq_floor(rcg->freq_tbl, rate);
+ break;
+ case CEIL:
+ f = qcom_find_freq(rcg->freq_tbl, rate);
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ if (!f)
+ return -EINVAL;
+
+ return clk_rcg2_configure(rcg, f);
+}
+
+static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return __clk_rcg2_set_rate(hw, rate, CEIL);
+}
+
+static int clk_rcg2_set_floor_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return __clk_rcg2_set_rate(hw, rate, FLOOR);
+}
+
+static int clk_rcg2_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ return __clk_rcg2_set_rate(hw, rate, CEIL);
+}
+
+static int clk_rcg2_set_floor_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ return __clk_rcg2_set_rate(hw, rate, FLOOR);
+}
+
+const struct clk_ops clk_rcg2_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .determine_rate = clk_rcg2_determine_rate,
+ .set_rate = clk_rcg2_set_rate,
+ .set_rate_and_parent = clk_rcg2_set_rate_and_parent,
+};
+EXPORT_SYMBOL_GPL(clk_rcg2_ops);
+
+const struct clk_ops clk_rcg2_floor_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .determine_rate = clk_rcg2_determine_floor_rate,
+ .set_rate = clk_rcg2_set_floor_rate,
+ .set_rate_and_parent = clk_rcg2_set_floor_rate_and_parent,
+};
+EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops);
+
+struct frac_entry {
+ int num;
+ int den;
+};
+
+static const struct frac_entry frac_table_675m[] = { /* link rate of 270M */
+ { 52, 295 }, /* 119 M */
+ { 11, 57 }, /* 130.25 M */
+ { 63, 307 }, /* 138.50 M */
+ { 11, 50 }, /* 148.50 M */
+ { 47, 206 }, /* 154 M */
+ { 31, 100 }, /* 205.25 M */
+ { 107, 269 }, /* 268.50 M */
+ { },
+};
+
+static struct frac_entry frac_table_810m[] = { /* Link rate of 162M */
+ { 31, 211 }, /* 119 M */
+ { 32, 199 }, /* 130.25 M */
+ { 63, 307 }, /* 138.50 M */
+ { 11, 60 }, /* 148.50 M */
+ { 50, 263 }, /* 154 M */
+ { 31, 120 }, /* 205.25 M */
+ { 119, 359 }, /* 268.50 M */
+ { },
+};
+
+static int clk_edp_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ struct freq_tbl f = *rcg->freq_tbl;
+ const struct frac_entry *frac;
+ int delta = 100000;
+ s64 src_rate = parent_rate;
+ s64 request;
+ u32 mask = BIT(rcg->hid_width) - 1;
+ u32 hid_div;
+
+ if (src_rate == 810000000)
+ frac = frac_table_810m;
+ else
+ frac = frac_table_675m;
+
+ for (; frac->num; frac++) {
+ request = rate;
+ request *= frac->den;
+ request = div_s64(request, frac->num);
+ if ((src_rate < (request - delta)) ||
+ (src_rate > (request + delta)))
+ continue;
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
+ &hid_div);
+ f.pre_div = hid_div;
+ f.pre_div >>= CFG_SRC_DIV_SHIFT;
+ f.pre_div &= mask;
+ f.m = frac->num;
+ f.n = frac->den;
+
+ return clk_rcg2_configure(rcg, &f);
+ }
+
+ return -EINVAL;
+}
+
+static int clk_edp_pixel_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ /* Parent index is set statically in frequency table */
+ return clk_edp_pixel_set_rate(hw, rate, parent_rate);
+}
+
+static int clk_edp_pixel_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ const struct freq_tbl *f = rcg->freq_tbl;
+ const struct frac_entry *frac;
+ int delta = 100000;
+ s64 request;
+ u32 mask = BIT(rcg->hid_width) - 1;
+ u32 hid_div;
+ int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
+
+ /* Force the correct parent */
+ req->best_parent_hw = clk_hw_get_parent_by_index(hw, index);
+ req->best_parent_rate = clk_hw_get_rate(req->best_parent_hw);
+
+ if (req->best_parent_rate == 810000000)
+ frac = frac_table_810m;
+ else
+ frac = frac_table_675m;
+
+ for (; frac->num; frac++) {
+ request = req->rate;
+ request *= frac->den;
+ request = div_s64(request, frac->num);
+ if ((req->best_parent_rate < (request - delta)) ||
+ (req->best_parent_rate > (request + delta)))
+ continue;
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
+ &hid_div);
+ hid_div >>= CFG_SRC_DIV_SHIFT;
+ hid_div &= mask;
+
+ req->rate = calc_rate(req->best_parent_rate,
+ frac->num, frac->den,
+ !!frac->den, hid_div);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+const struct clk_ops clk_edp_pixel_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .set_rate = clk_edp_pixel_set_rate,
+ .set_rate_and_parent = clk_edp_pixel_set_rate_and_parent,
+ .determine_rate = clk_edp_pixel_determine_rate,
+};
+EXPORT_SYMBOL_GPL(clk_edp_pixel_ops);
+
+static int clk_byte_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ const struct freq_tbl *f = rcg->freq_tbl;
+ int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
+ unsigned long parent_rate, div;
+ u32 mask = BIT(rcg->hid_width) - 1;
+ struct clk_hw *p;
+
+ if (req->rate == 0)
+ return -EINVAL;
+
+ req->best_parent_hw = p = clk_hw_get_parent_by_index(hw, index);
+ req->best_parent_rate = parent_rate = clk_hw_round_rate(p, req->rate);
+
+ div = DIV_ROUND_UP((2 * parent_rate), req->rate) - 1;
+ div = min_t(u32, div, mask);
+
+ req->rate = calc_rate(parent_rate, 0, 0, 0, div);
+
+ return 0;
+}
+
+static int clk_byte_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ struct freq_tbl f = *rcg->freq_tbl;
+ unsigned long div;
+ u32 mask = BIT(rcg->hid_width) - 1;
+
+ div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
+ div = min_t(u32, div, mask);
+
+ f.pre_div = div;
+
+ return clk_rcg2_configure(rcg, &f);
+}
+
+static int clk_byte_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ /* Parent index is set statically in frequency table */
+ return clk_byte_set_rate(hw, rate, parent_rate);
+}
+
+const struct clk_ops clk_byte_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .set_rate = clk_byte_set_rate,
+ .set_rate_and_parent = clk_byte_set_rate_and_parent,
+ .determine_rate = clk_byte_determine_rate,
+};
+EXPORT_SYMBOL_GPL(clk_byte_ops);
+
+static int clk_byte2_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ unsigned long parent_rate, div;
+ u32 mask = BIT(rcg->hid_width) - 1;
+ struct clk_hw *p;
+ unsigned long rate = req->rate;
+
+ if (rate == 0)
+ return -EINVAL;
+
+ p = req->best_parent_hw;
+ req->best_parent_rate = parent_rate = clk_hw_round_rate(p, rate);
+
+ div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
+ div = min_t(u32, div, mask);
+
+ req->rate = calc_rate(parent_rate, 0, 0, 0, div);
+
+ return 0;
+}
+
+static int clk_byte2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ struct freq_tbl f = { 0 };
+ unsigned long div;
+ int i, num_parents = clk_hw_get_num_parents(hw);
+ u32 mask = BIT(rcg->hid_width) - 1;
+ u32 cfg;
+
+ div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
+ div = min_t(u32, div, mask);
+
+ f.pre_div = div;
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+ cfg &= CFG_SRC_SEL_MASK;
+ cfg >>= CFG_SRC_SEL_SHIFT;
+
+ for (i = 0; i < num_parents; i++) {
+ if (cfg == rcg->parent_map[i].cfg) {
+ f.src = rcg->parent_map[i].src;
+ return clk_rcg2_configure(rcg, &f);
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int clk_byte2_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ /* Read the hardware to determine parent during set_rate */
+ return clk_byte2_set_rate(hw, rate, parent_rate);
+}
+
+const struct clk_ops clk_byte2_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .set_rate = clk_byte2_set_rate,
+ .set_rate_and_parent = clk_byte2_set_rate_and_parent,
+ .determine_rate = clk_byte2_determine_rate,
+};
+EXPORT_SYMBOL_GPL(clk_byte2_ops);
+
+static const struct frac_entry frac_table_pixel[] = {
+ { 3, 8 },
+ { 2, 9 },
+ { 4, 9 },
+ { 1, 1 },
+ { }
+};
+
+static int clk_pixel_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ unsigned long request, src_rate;
+ int delta = 100000;
+ const struct frac_entry *frac = frac_table_pixel;
+
+ for (; frac->num; frac++) {
+ request = (req->rate * frac->den) / frac->num;
+
+ src_rate = clk_hw_round_rate(req->best_parent_hw, request);
+ if ((src_rate < (request - delta)) ||
+ (src_rate > (request + delta)))
+ continue;
+
+ req->best_parent_rate = src_rate;
+ req->rate = (src_rate * frac->num) / frac->den;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ struct freq_tbl f = { 0 };
+ const struct frac_entry *frac = frac_table_pixel;
+ unsigned long request;
+ int delta = 100000;
+ u32 mask = BIT(rcg->hid_width) - 1;
+ u32 hid_div, cfg;
+ int i, num_parents = clk_hw_get_num_parents(hw);
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+ cfg &= CFG_SRC_SEL_MASK;
+ cfg >>= CFG_SRC_SEL_SHIFT;
+
+ for (i = 0; i < num_parents; i++)
+ if (cfg == rcg->parent_map[i].cfg) {
+ f.src = rcg->parent_map[i].src;
+ break;
+ }
+
+ for (; frac->num; frac++) {
+ request = (rate * frac->den) / frac->num;
+
+ if ((parent_rate < (request - delta)) ||
+ (parent_rate > (request + delta)))
+ continue;
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
+ &hid_div);
+ f.pre_div = hid_div;
+ f.pre_div >>= CFG_SRC_DIV_SHIFT;
+ f.pre_div &= mask;
+ f.m = frac->num;
+ f.n = frac->den;
+
+ return clk_rcg2_configure(rcg, &f);
+ }
+ return -EINVAL;
+}
+
+static int clk_pixel_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate, u8 index)
+{
+ return clk_pixel_set_rate(hw, rate, parent_rate);
+}
+
+const struct clk_ops clk_pixel_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .set_rate = clk_pixel_set_rate,
+ .set_rate_and_parent = clk_pixel_set_rate_and_parent,
+ .determine_rate = clk_pixel_determine_rate,
+};
+EXPORT_SYMBOL_GPL(clk_pixel_ops);
+
+static int clk_gfx3d_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rate_request parent_req = { };
+ struct clk_hw *p2, *p8, *p9, *xo;
+ unsigned long p9_rate;
+ int ret;
+
+ xo = clk_hw_get_parent_by_index(hw, 0);
+ if (req->rate == clk_hw_get_rate(xo)) {
+ req->best_parent_hw = xo;
+ return 0;
+ }
+
+ p9 = clk_hw_get_parent_by_index(hw, 2);
+ p2 = clk_hw_get_parent_by_index(hw, 3);
+ p8 = clk_hw_get_parent_by_index(hw, 4);
+
+ /* PLL9 is a fixed rate PLL */
+ p9_rate = clk_hw_get_rate(p9);
+
+ parent_req.rate = req->rate = min(req->rate, p9_rate);
+ if (req->rate == p9_rate) {
+ req->rate = req->best_parent_rate = p9_rate;
+ req->best_parent_hw = p9;
+ return 0;
+ }
+
+ if (req->best_parent_hw == p9) {
+ /* Are we going back to a previously used rate? */
+ if (clk_hw_get_rate(p8) == req->rate)
+ req->best_parent_hw = p8;
+ else
+ req->best_parent_hw = p2;
+ } else if (req->best_parent_hw == p8) {
+ req->best_parent_hw = p2;
+ } else {
+ req->best_parent_hw = p8;
+ }
+
+ ret = __clk_determine_rate(req->best_parent_hw, &parent_req);
+ if (ret)
+ return ret;
+
+ req->rate = req->best_parent_rate = parent_req.rate;
+
+ return 0;
+}
+
+static int clk_gfx3d_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate, u8 index)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ u32 cfg;
+ int ret;
+
+ /* Just mux it, we don't use the division or m/n hardware */
+ cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
+ ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
+ if (ret)
+ return ret;
+
+ return update_config(rcg);
+}
+
+static int clk_gfx3d_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ /*
+ * We should never get here; clk_gfx3d_determine_rate() should always
+ * make us use a different parent than what we're currently using, so
+ * clk_gfx3d_set_rate_and_parent() should always be called.
+ */
+ return 0;
+}
+
+const struct clk_ops clk_gfx3d_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .set_rate = clk_gfx3d_set_rate,
+ .set_rate_and_parent = clk_gfx3d_set_rate_and_parent,
+ .determine_rate = clk_gfx3d_determine_rate,
+};
+EXPORT_SYMBOL_GPL(clk_gfx3d_ops);
+
+static int clk_rcg2_set_force_enable(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ const char *name = clk_hw_get_name(hw);
+ int ret, count;
+
+<<<<<<<
+=======
+ /* Force enable bit */
+>>>>>>>
+ ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
+ CMD_ROOT_EN, CMD_ROOT_EN);
+ if (ret)
+ return ret;
+
+ /* wait for RCG to turn ON */
+ for (count = 500; count > 0; count--) {
+ if (clk_rcg2_is_enabled(hw))
+ return 0;
+
+<<<<<<<
+ /* Delay for 1usec and retry polling the status bit */
+ udelay(1);
+ }
+ if (!count)
+ pr_err("%s: RCG did not turn on\n", name);
+
+=======
+ udelay(1);
+ }
+
+ pr_err("%s: RCG did not turn on\n", name);
+>>>>>>>
+ return -ETIMEDOUT;
+}
+
+static int clk_rcg2_clear_force_enable(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+<<<<<<<
+=======
+ /* Clear force enable bit */
+>>>>>>>
+ return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
+ CMD_ROOT_EN, 0);
+}
+
+static int
+<<<<<<<
+clk_rcg2_shared_force_enable_clear(struct clk_hw *hw, const struct freq_tbl *f)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+=======
+clk_rcg2_shared_force_enable_clear(struct clk_hw *hw, unsigned long rate)
+{
+>>>>>>>
+ int ret;
+
+ ret = clk_rcg2_set_force_enable(hw);
+ if (ret)
+ return ret;
+
+<<<<<<<
+ /* set clock rate */
+ ret = __clk_rcg2_set_rate(hw, rate, CEIL);
+=======
+ ret = clk_rcg2_configure(rcg, f);
+>>>>>>>
+ if (ret)
+ return ret;
+
+ return clk_rcg2_clear_force_enable(hw);
+}
+
+static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+<<<<<<<
+ const struct freq_tbl *f;
+
+ f = qcom_find_freq(rcg->freq_tbl, rate);
+ if (!f)
+ return -EINVAL;
+
+ /*
+ * In case clock is disabled, update the CFG, M, N and D registers
+ * and don't hit the update bit of CMD register.
+ */
+ if (!__clk_is_enabled(hw->clk))
+ return __clk_rcg2_configure(rcg, f);
+
+ return clk_rcg2_shared_force_enable_clear(hw, f);
+=======
+ int ret;
+
+ /*
+ * Return if the RCG is currently disabled. This configuration
+ * update will happen as part of the RCG enable sequence.
+ */
+ if (!__clk_is_enabled(hw->clk)) {
+ rcg->current_freq = rate;
+ return 0;
+ }
+
+ ret = clk_rcg2_shared_force_enable_clear(hw, rate);
+ if (ret)
+ return ret;
+
+ /* Update current frequency with the requested frequency. */
+ rcg->current_freq = rate;
+
+ return ret;
+>>>>>>>
+}
+
+static int clk_rcg2_shared_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ return clk_rcg2_shared_set_rate(hw, rate, parent_rate);
+}
+
+<<<<<<<
+static int clk_rcg2_shared_enable(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ int ret;
+
+ /*
+ * Set the update bit because required configuration has already
+ * been written in clk_rcg2_shared_set_rate()
+ */
+ ret = clk_rcg2_set_force_enable(hw);
+ if (ret)
+ return ret;
+
+ ret = update_config(rcg);
+ if (ret)
+ return ret;
+
+ return clk_rcg2_clear_force_enable(hw);
+=======
+static unsigned long
+clk_rcg2_shared_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ if (!__clk_is_enabled(hw->clk)) {
+ if (!rcg->current_freq) {
+ if (!clk_rcg2_get_parent(hw))
+ rcg->current_freq =
+ rcg->safe_src_freq_tbl->freq;
+ else
+ rcg->current_freq =
+ clk_rcg2_recalc_rate(hw, parent_rate);
+ }
+
+ return rcg->current_freq;
+ }
+
+ return rcg->current_freq = clk_rcg2_recalc_rate(hw, parent_rate);
+}
+
+static int clk_rcg2_shared_enable(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ if (rcg->current_freq == rcg->safe_src_freq_tbl->freq) {
+ clk_rcg2_set_force_enable(hw);
+ clk_rcg2_configure(rcg, rcg->safe_src_freq_tbl);
+ clk_rcg2_clear_force_enable(hw);
+
+ return 0;
+ }
+
+ /*
+ * Switch from safe source to the stashed mux selection. The current
+ * parent has already been prepared and enabled at this point, and
+ * the safe source is always on while application processor subsystem
+ * is online. Therefore, the RCG can safely switch its source.
+ */
+
+ return clk_rcg2_shared_force_enable_clear(hw, rcg->current_freq);
+>>>>>>>
+}
+
+static void clk_rcg2_shared_disable(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+<<<<<<<
+ u32 cfg;
+
+ /*
+ * Store current configuration as switching to safe source would clear
+ * the SRC and DIV of CFG register
+ */
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+
+ /*
+ * Park the RCG at a safe configuration - sourced off of safe source.
+=======
+
+ /*
+ * Park the RCG at a safe configuration - sourced off from safe source.
+>>>>>>>
+ * Force enable and disable the RCG while configuring it to safeguard
+ * against any update signal coming from the downstream clock.
+ * The current parent is still prepared and enabled at this point, and
+ * the safe source is always on while application processor subsystem
+ * is online. Therefore, the RCG can safely switch its parent.
+ */
+ clk_rcg2_set_force_enable(hw);
+<<<<<<<
+ clk_rcg2_configure(rcg, rcg->safe_src_freq_tbl);
+ clk_rcg2_clear_force_enable(hw);
+=======
+
+ regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
+ rcg->safe_src_index << CFG_SRC_SEL_SHIFT);
+
+ update_config(rcg);
+
+ clk_rcg2_clear_force_enable(hw);
+
+ /* Write back the stored configuration corresponding to current rate */
+ regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
+>>>>>>>
+}
+
+const struct clk_ops clk_rcg2_shared_ops = {
+ .enable = clk_rcg2_shared_enable,
+ .disable = clk_rcg2_shared_disable,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+<<<<<<<
+ .recalc_rate = clk_rcg2_recalc_rate,
+=======
+ .recalc_rate = clk_rcg2_shared_recalc_rate,
+>>>>>>>
+ .determine_rate = clk_rcg2_determine_rate,
+ .set_rate = clk_rcg2_shared_set_rate,
+ .set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent,
+};
+EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);
diff --git a/rr-cache/43c60db993e04ae4bbaad73360b168398f7c9aa4/preimage b/rr-cache/43c60db993e04ae4bbaad73360b168398f7c9aa4/preimage
new file mode 100644
index 0000000..f0ffe96
--- /dev/null
+++ b/rr-cache/43c60db993e04ae4bbaad73360b168398f7c9aa4/preimage
@@ -0,0 +1,3988 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+<<<<<<<
+=======
+#include <linux/clk.h>
+>>>>>>>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "clk-alpha-pll.h"
+#include "gdsc.h"
+#include "reset.h"
+
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+
+<<<<<<<
+=======
+static struct freq_tbl cxo_safe_src_f = {
+ .pre_div = 1,
+};
+
+>>>>>>>
+enum {
+ P_BI_TCXO,
+ P_AUD_REF_CLK,
+ P_CORE_BI_PLL_TEST_SE,
+ P_GPLL0_OUT_EVEN,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL4_OUT_MAIN,
+ P_SLEEP_CLK,
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_0[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll0_out_even",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_1[] = {
+ "bi_tcxo",
+ "gpll0",
+ "core_pi_sleep_clk",
+ "gpll0_out_even",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_2[] = {
+ "bi_tcxo",
+ "core_pi_sleep_clk",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_3[] = {
+ "bi_tcxo",
+ "gpll0",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_4[] = {
+ "bi_tcxo",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_5[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll4",
+ "gpll0_out_even",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_AUD_REF_CLK, 2 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_6[] = {
+ "bi_tcxo",
+ "gpll0",
+ "aud_ref_clk",
+ "gpll0_out_even",
+ "core_bi_pll_test_se",
+};
+
+static const char * const gcc_parent_names_7[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll0_out_even",
+ "core_bi_pll_test_se",
+};
+
+static const char * const gcc_parent_names_8[] = {
+ "bi_tcxo",
+ "gpll0",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_10[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll4",
+ "gpll0_out_even",
+ "core_bi_pll_test_se",
+};
+
+<<<<<<<
+=======
+static struct clk_fixed_factor bi_tcxo = {
+ .mult = 1,
+ .div = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "bi_tcxo",
+ .parent_names = (const char *[]){ "xo_board" },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+>>>>>>>
+static struct clk_alpha_pll gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_names = (const char *[]){ "bi_tcxo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll4 = {
+ .offset = 0x76000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll4",
+ .parent_names = (const char *[]){ "bi_tcxo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_fabia_even[] = {
+ { 0x0, 1 },
+ { 0x1, 2 },
+ { 0x3, 4 },
+ { 0x7, 8 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_fabia_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_even",
+ .parent_names = (const char *[]){ "gpll0" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
+ .cmd_rcgr = 0x48014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk_src",
+ .parent_names = gcc_parent_names_7,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_cpuss_rbcpr_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_cpuss_rbcpr_clk_src = {
+ .cmd_rcgr = 0x4815c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_rbcpr_clk_src",
+ .parent_names = gcc_parent_names_8,
+ .num_parents = 3,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x64004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 5,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x65004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 5,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x66004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 5,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0x6b028,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_aux_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 3,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_aux_clk_src = {
+ .cmd_rcgr = 0x8d028,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_aux_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 3,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_phy_refgen_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_phy_refgen_clk_src = {
+ .cmd_rcgr = 0x6f014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_phy_refgen_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_phy_refgen_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x33010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(80000000, P_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(102400000, P_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(120000000, P_GPLL0_OUT_EVEN, 2.5, 0, 0),
+ F(128000000, P_GPLL0_OUT_MAIN, 1, 16, 75),
+ { }
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x17034,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x17164,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+ .cmd_rcgr = 0x17294,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x173c4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x174f4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x17624,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
+ .cmd_rcgr = 0x17754,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s6_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
+ .cmd_rcgr = 0x17884,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s7_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x18018,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x18148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
+ .cmd_rcgr = 0x18278,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x183a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x184d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x18608,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
+ .cmd_rcgr = 0x18738,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s6_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
+ .cmd_rcgr = 0x18868,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s7_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(201500000, P_GPLL4_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x1400c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_10,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_names = gcc_parent_names_10,
+ .num_parents = 5,
+<<<<<<<
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+=======
+ .ops = &clk_rcg2_ops,
+>>>>>>>
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc4_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
+ .cmd_rcgr = 0x1600c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_sdcc4_apps_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+=======
+ .ops = &clk_rcg2_ops,
+>>>>>>>
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_tsif_ref_clk_src[] = {
+ F(105495, P_BI_TCXO, 2, 1, 91),
+ { }
+};
+
+static struct clk_rcg2 gcc_tsif_ref_clk_src = {
+ .cmd_rcgr = 0x36010,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_tsif_ref_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ref_clk_src",
+ .parent_names = gcc_parent_names_6,
+ .num_parents = 5,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_axi_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_axi_clk_src = {
+ .cmd_rcgr = 0x7501c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_axi_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_axi_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_ice_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_ice_core_clk_src = {
+ .cmd_rcgr = 0x7505c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_ice_core_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_card_phy_aux_clk_src = {
+ .cmd_rcgr = 0x75090,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_phy_aux_clk_src",
+ .parent_names = gcc_parent_names_4,
+ .num_parents = 2,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_unipro_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_unipro_core_clk_src = {
+ .cmd_rcgr = 0x75074,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_unipro_core_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_unipro_core_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x7701c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x7705c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x77090,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_names = gcc_parent_names_4,
+ .num_parents = 2,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x77074,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_unipro_core_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(33333333, P_GPLL0_OUT_EVEN, 9, 0, 0),
+ F(66666667, P_GPLL0_OUT_EVEN, 4.5, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0xf018,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_mock_utmi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(20000000, P_GPLL0_OUT_EVEN, 15, 0, 0),
+ F(40000000, P_GPLL0_OUT_EVEN, 7.5, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xf030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_sec_master_clk_src = {
+ .cmd_rcgr = 0x10018,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_master_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_sec_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x10030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_mock_utmi_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0xf05c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 3,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_sec_phy_aux_clk_src = {
+ .cmd_rcgr = 0x1005c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_aux_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 3,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_vs_ctrl_clk_src = {
+ .cmd_rcgr = 0x7a030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_vs_ctrl_clk_src",
+ .parent_names = gcc_parent_names_3,
+ .num_parents = 3,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_vsensor_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ F(600000000, P_GPLL0_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_vsensor_clk_src = {
+ .cmd_rcgr = 0x7a018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_vsensor_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_vsensor_clk_src",
+ .parent_names = gcc_parent_names_8,
+ .num_parents = 3,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_tbu_clk = {
+ .halt_reg = 0x90014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x90014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_noc_pcie_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_card_axi_clk = {
+ .halt_reg = 0x82028,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x82028,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x82028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_card_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_card_axi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+ .halt_reg = 0x82024,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x82024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x82024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_phy_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_phy_axi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0x8201c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_prim_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_sec_axi_clk = {
+ .halt_reg = 0x82020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x82020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_usb3_sec_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_sec_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_apc_vs_clk = {
+ .halt_reg = 0x7a050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_apc_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x38004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x38004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_ahb_clk = {
+ .halt_reg = 0xb008,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_ahb_clk",
+<<<<<<<
+=======
+ .flags = CLK_IS_CRITICAL,
+>>>>>>>
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_axi_clk = {
+ .halt_reg = 0xb020,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0xb020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_xo_clk = {
+ .halt_reg = 0xb02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_xo_clk",
+<<<<<<<
+=======
+ .flags = CLK_IS_CRITICAL,
+>>>>>>>
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_ahb_clk = {
+ .halt_reg = 0x4100c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x4100c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_axi_clk = {
+ .halt_reg = 0x41008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_clk = {
+ .halt_reg = 0x41004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x502c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x502c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_prim_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_sec_axi_clk = {
+ .halt_reg = 0x5030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_sec_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_sec_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_ahb_clk = {
+ .halt_reg = 0x48000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk",
+ .parent_names = (const char *[]){
+ "gcc_cpuss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+<<<<<<<
+=======
+static struct clk_branch gcc_cpuss_dvm_bus_clk = {
+ .halt_reg = 0x48190,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x48190,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_dvm_bus_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_gnoc_clk = {
+ .halt_reg = 0x48004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x48004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_gnoc_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+>>>>>>>
+static struct clk_branch gcc_cpuss_rbcpr_clk = {
+ .halt_reg = 0x48008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x48008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_rbcpr_clk",
+ .parent_names = (const char *[]){
+ "gcc_cpuss_rbcpr_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_gpu_axi_clk = {
+ .halt_reg = 0x44038,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x44038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ddrss_gpu_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_ahb_clk = {
+ .halt_reg = 0xb00c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb00c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_ahb_clk",
+<<<<<<<
+=======
+ .flags = CLK_IS_CRITICAL,
+>>>>>>>
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_axi_clk = {
+ .halt_reg = 0xb024,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0xb024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_gpll0_clk_src = {
+<<<<<<<
+=======
+ .halt_reg = 0x52004,
+>>>>>>>
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(18),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_gpll0_clk_src",
+ .parent_names = (const char *[]){
+ "gpll0",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_gpll0_div_clk_src = {
+<<<<<<<
+=======
+ .halt_reg = 0x52004,
+>>>>>>>
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_gpll0_div_clk_src",
+ .parent_names = (const char *[]){
+ "gpll0_out_even",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_xo_clk = {
+ .halt_reg = 0xb030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_xo_clk",
+<<<<<<<
+=======
+ .flags = CLK_IS_CRITICAL,
+>>>>>>>
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x64000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x64000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_names = (const char *[]){
+ "gcc_gp1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x65000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x65000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_names = (const char *[]){
+ "gcc_gp2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x66000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x66000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_names = (const char *[]){
+ "gcc_gp3_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_cfg_ahb_clk = {
+ .halt_reg = 0x71004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x71004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_cfg_ahb_clk",
+<<<<<<<
+=======
+ .flags = CLK_IS_CRITICAL,
+>>>>>>>
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+<<<<<<<
+=======
+ .halt_reg = 0x52004,
+>>>>>>>
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_names = (const char *[]){
+ "gpll0",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+<<<<<<<
+=======
+ .halt_reg = 0x52004,
+>>>>>>>
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_names = (const char *[]){
+ "gpll0_out_even",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_iref_clk = {
+ .halt_reg = 0x8c010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_iref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+ .halt_reg = 0x7100c,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x7100c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x71018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x71018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_vs_clk = {
+ .halt_reg = 0x7a04c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a04c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_axis2_clk = {
+ .halt_reg = 0x8a008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8a008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_axis2_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_cfg_ahb_clk = {
+ .halt_reg = 0x8a000,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x8a000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x8a000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_gpll0_div_clk_src = {
+<<<<<<<
+=======
+ .halt_reg = 0x52004,
+>>>>>>>
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_gpll0_div_clk_src",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_mfab_axis_clk = {
+ .halt_reg = 0x8a004,
+ .halt_check = BRANCH_VOTED,
+ .hwcg_reg = 0x8a004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x8a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_mfab_axis_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_q6_memnoc_axi_clk = {
+ .halt_reg = 0x8a154,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x8a154,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_q6_memnoc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_snoc_axi_clk = {
+ .halt_reg = 0x8a150,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8a150,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_snoc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_vs_clk = {
+ .halt_reg = 0x7a048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0x6b01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_pcie_0_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0x6b018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_clkref_clk = {
+ .halt_reg = 0x8c00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0x6b014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+<<<<<<<
+ .halt_check = BRANCH_HALT_SKIP,
+=======
+ .halt_reg = 0x6b020,
+ .halt_check = BRANCH_HALT_DELAY,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0x6b010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = {
+ .halt_reg = 0x6b00c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_aux_clk = {
+ .halt_reg = 0x8d01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(29),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_pcie_1_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
+ .halt_reg = 0x8d018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8d018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(28),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_clkref_clk = {
+ .halt_reg = 0x8c02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
+ .halt_reg = 0x8d014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_pipe_clk = {
+<<<<<<<
+ .halt_check = BRANCH_HALT_SKIP,
+=======
+ .halt_reg = 0x8d020,
+ .halt_check = BRANCH_HALT_DELAY,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(30),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_axi_clk = {
+ .halt_reg = 0x8d010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8d010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_q2a_axi_clk = {
+ .halt_reg = 0x8d00c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_aux_clk = {
+ .halt_reg = 0x6f004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6f004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_phy_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_pcie_0_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_refgen_clk = {
+ .halt_reg = 0x6f02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6f02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_phy_refgen_clk",
+ .parent_names = (const char *[]){
+ "gcc_pcie_phy_refgen_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_names = (const char *[]){
+ "gcc_pdm2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x33004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x33004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x33004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x33008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x34004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x34004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_ahb_clk = {
+ .halt_reg = 0xb014,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_camera_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp_ahb_clk = {
+ .halt_reg = 0xb018,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_disp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_ahb_clk = {
+ .halt_reg = 0xb010,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_video_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x17030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x17160,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x17290,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x173c0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s3_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x174f0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s4_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x17620,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s5_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s6_clk = {
+ .halt_reg = 0x17750,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s6_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s6_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s7_clk = {
+ .halt_reg = 0x17880,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s7_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s7_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x18014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap1_s0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x18144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(23),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap1_s1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x18274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(24),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap1_s2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x183a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap1_s3_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x184d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap1_s4_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x18604,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap1_s5_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s6_clk = {
+ .halt_reg = 0x18734,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(28),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s6_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap1_s6_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s7_clk = {
+ .halt_reg = 0x18864,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(29),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s7_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap1_s7_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x17008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x1800c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x18010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x18010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x14008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x14004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_names = (const char *[]){
+ "gcc_sdcc2_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_ahb_clk = {
+ .halt_reg = 0x16008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_apps_clk = {
+ .halt_reg = 0x16004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk",
+ .parent_names = (const char *[]){
+ "gcc_sdcc4_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = {
+ .halt_reg = 0x414c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sys_noc_cpuss_ahb_clk",
+ .parent_names = (const char *[]){
+ "gcc_cpuss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_ahb_clk = {
+ .halt_reg = 0x36004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_inactivity_timers_clk = {
+ .halt_reg = 0x3600c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_inactivity_timers_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_ref_clk = {
+ .halt_reg = 0x36008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ref_clk",
+ .parent_names = (const char *[]){
+ "gcc_tsif_ref_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_ahb_clk = {
+ .halt_reg = 0x75010,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x75010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_axi_clk = {
+ .halt_reg = 0x7500c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7500c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7500c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_card_axi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_clkref_clk = {
+ .halt_reg = 0x8c004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_ice_core_clk = {
+ .halt_reg = 0x75058,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x75058,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_ice_core_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_card_ice_core_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_phy_aux_clk = {
+ .halt_reg = 0x7508c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7508c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7508c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_phy_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_card_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_rx_symbol_0_clk = {
+<<<<<<<
+ .halt_check = BRANCH_HALT_SKIP,
+=======
+ .halt_reg = 0x75018,
+ .halt_check = BRANCH_HALT_DELAY,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0x75018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_rx_symbol_1_clk = {
+<<<<<<<
+ .halt_check = BRANCH_HALT_SKIP,
+=======
+ .halt_reg = 0x750a8,
+ .halt_check = BRANCH_HALT_DELAY,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0x750a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_rx_symbol_1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_tx_symbol_0_clk = {
+<<<<<<<
+ .halt_check = BRANCH_HALT_SKIP,
+=======
+ .halt_reg = 0x75014,
+ .halt_check = BRANCH_HALT_DELAY,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0x75014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_unipro_core_clk = {
+ .halt_reg = 0x75054,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x75054,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_unipro_core_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_card_unipro_core_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_mem_clkref_clk = {
+ .halt_reg = 0x8c000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_mem_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0x77010,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x7700c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7700c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7700c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_phy_axi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x77058,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77058,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_phy_ice_core_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x7708c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7708c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7708c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_phy_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+<<<<<<<
+ .halt_check = BRANCH_HALT_SKIP,
+=======
+ .halt_reg = 0x77018,
+ .halt_check = BRANCH_HALT_DELAY,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0x77018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = {
+<<<<<<<
+ .halt_check = BRANCH_HALT_SKIP,
+=======
+ .halt_reg = 0x770a8,
+ .halt_check = BRANCH_HALT_DELAY,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0x770a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+<<<<<<<
+ .halt_check = BRANCH_HALT_SKIP,
+=======
+ .halt_reg = 0x77014,
+ .halt_check = BRANCH_HALT_DELAY,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0x77014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x77054,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77054,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_phy_unipro_core_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0xf00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_prim_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0xf014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_prim_mock_utmi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0xf010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_master_clk = {
+ .halt_reg = 0x1000c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1000c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_master_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_sec_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_mock_utmi_clk = {
+ .halt_reg = 0x10014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_mock_utmi_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_sec_mock_utmi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_sleep_clk = {
+ .halt_reg = 0x10010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_clkref_clk = {
+ .halt_reg = 0x8c008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0xf04c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf04c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb3_prim_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0xf050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb3_prim_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+<<<<<<<
+ .halt_check = BRANCH_HALT_SKIP,
+=======
+ .halt_reg = 0xf054,
+ .halt_check = BRANCH_HALT_DELAY,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0xf054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_clkref_clk = {
+ .halt_reg = 0x8c028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_aux_clk = {
+ .halt_reg = 0x1004c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1004c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb3_sec_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_com_aux_clk = {
+ .halt_reg = 0x10050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_com_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb3_sec_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_pipe_clk = {
+<<<<<<<
+ .halt_check = BRANCH_HALT_SKIP,
+=======
+ .halt_reg = 0x10054,
+ .halt_check = BRANCH_HALT_DELAY,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0x10054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = {
+ .halt_reg = 0x6a004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x6a004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_phy_cfg_ahb2phy_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vdda_vs_clk = {
+ .halt_reg = 0x7a00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vdda_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vddcx_vs_clk = {
+ .halt_reg = 0x7a004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vddcx_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vddmx_vs_clk = {
+ .halt_reg = 0x7a008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vddmx_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_ahb_clk = {
+ .halt_reg = 0xb004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_ahb_clk",
+<<<<<<<
+=======
+ .flags = CLK_IS_CRITICAL,
+>>>>>>>
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi_clk = {
+ .halt_reg = 0xb01c,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0xb01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_xo_clk = {
+ .halt_reg = 0xb028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_xo_clk",
+<<<<<<<
+=======
+ .flags = CLK_IS_CRITICAL,
+>>>>>>>
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vs_ctrl_ahb_clk = {
+ .halt_reg = 0x7a014,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7a014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7a014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vs_ctrl_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vs_ctrl_clk = {
+ .halt_reg = 0x7a010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vs_ctrl_clk",
+ .parent_names = (const char *[]){
+ "gcc_vs_ctrl_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc pcie_0_gdsc = {
+ .gdscr = 0x6b004,
+ .pd = {
+ .name = "pcie_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = POLL_CFG_GDSCR,
+>>>>>>>
+};
+
+static struct gdsc pcie_1_gdsc = {
+ .gdscr = 0x8d004,
+ .pd = {
+ .name = "pcie_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = POLL_CFG_GDSCR,
+>>>>>>>
+};
+
+static struct gdsc ufs_card_gdsc = {
+ .gdscr = 0x75004,
+ .pd = {
+ .name = "ufs_card_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = POLL_CFG_GDSCR,
+>>>>>>>
+};
+
+static struct gdsc ufs_phy_gdsc = {
+ .gdscr = 0x77004,
+ .pd = {
+ .name = "ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = POLL_CFG_GDSCR,
+>>>>>>>
+};
+
+static struct gdsc usb30_prim_gdsc = {
+ .gdscr = 0xf004,
+ .pd = {
+ .name = "usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = POLL_CFG_GDSCR,
+>>>>>>>
+};
+
+static struct gdsc usb30_sec_gdsc = {
+ .gdscr = 0x10004,
+ .pd = {
+ .name = "usb30_sec_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = POLL_CFG_GDSCR,
+>>>>>>>
+};
+
+static struct gdsc hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc = {
+ .gdscr = 0x7d030,
+ .pd = {
+ .name = "hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = VOTABLE,
+>>>>>>>
+};
+
+static struct gdsc hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc = {
+ .gdscr = 0x7d03c,
+ .pd = {
+ .name = "hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = VOTABLE,
+>>>>>>>
+};
+
+static struct gdsc hlos1_vote_aggre_noc_mmu_tbu1_gdsc = {
+ .gdscr = 0x7d034,
+ .pd = {
+ .name = "hlos1_vote_aggre_noc_mmu_tbu1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = VOTABLE,
+>>>>>>>
+};
+
+static struct gdsc hlos1_vote_aggre_noc_mmu_tbu2_gdsc = {
+ .gdscr = 0x7d038,
+ .pd = {
+ .name = "hlos1_vote_aggre_noc_mmu_tbu2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = VOTABLE,
+>>>>>>>
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = {
+ .gdscr = 0x7d040,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = VOTABLE,
+>>>>>>>
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = {
+ .gdscr = 0x7d048,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = VOTABLE,
+>>>>>>>
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = {
+ .gdscr = 0x7d044,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_sf_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = VOTABLE,
+};
+
+static struct clk_hw *gcc_sdm845_hws[] = {
+ [BI_TCXO] = &bi_tcxo.hw,
+>>>>>>>
+};
+
+static struct clk_regmap *gcc_sdm845_clocks[] = {
+ [GCC_AGGRE_NOC_PCIE_TBU_CLK] = &gcc_aggre_noc_pcie_tbu_clk.clkr,
+ [GCC_AGGRE_UFS_CARD_AXI_CLK] = &gcc_aggre_ufs_card_axi_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_AGGRE_USB3_SEC_AXI_CLK] = &gcc_aggre_usb3_sec_axi_clk.clkr,
+ [GCC_APC_VS_CLK] = &gcc_apc_vs_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_AHB_CLK] = &gcc_camera_ahb_clk.clkr,
+ [GCC_CAMERA_AXI_CLK] = &gcc_camera_axi_clk.clkr,
+ [GCC_CAMERA_XO_CLK] = &gcc_camera_xo_clk.clkr,
+ [GCC_CE1_AHB_CLK] = &gcc_ce1_ahb_clk.clkr,
+ [GCC_CE1_AXI_CLK] = &gcc_ce1_axi_clk.clkr,
+ [GCC_CE1_CLK] = &gcc_ce1_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_SEC_AXI_CLK] = &gcc_cfg_noc_usb3_sec_axi_clk.clkr,
+ [GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr,
+ [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr,
+<<<<<<<
+=======
+ [GCC_CPUSS_DVM_BUS_CLK] = &gcc_cpuss_dvm_bus_clk.clkr,
+ [GCC_CPUSS_GNOC_CLK] = &gcc_cpuss_gnoc_clk.clkr,
+>>>>>>>
+ [GCC_CPUSS_RBCPR_CLK] = &gcc_cpuss_rbcpr_clk.clkr,
+ [GCC_CPUSS_RBCPR_CLK_SRC] = &gcc_cpuss_rbcpr_clk_src.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DISP_AHB_CLK] = &gcc_disp_ahb_clk.clkr,
+ [GCC_DISP_AXI_CLK] = &gcc_disp_axi_clk.clkr,
+ [GCC_DISP_GPLL0_CLK_SRC] = &gcc_disp_gpll0_clk_src.clkr,
+ [GCC_DISP_GPLL0_DIV_CLK_SRC] = &gcc_disp_gpll0_div_clk_src.clkr,
+ [GCC_DISP_XO_CLK] = &gcc_disp_xo_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_GPU_IREF_CLK] = &gcc_gpu_iref_clk.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_GPU_VS_CLK] = &gcc_gpu_vs_clk.clkr,
+ [GCC_MSS_AXIS2_CLK] = &gcc_mss_axis2_clk.clkr,
+ [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
+ [GCC_MSS_GPLL0_DIV_CLK_SRC] = &gcc_mss_gpll0_div_clk_src.clkr,
+ [GCC_MSS_MFAB_AXIS_CLK] = &gcc_mss_mfab_axis_clk.clkr,
+ [GCC_MSS_Q6_MEMNOC_AXI_CLK] = &gcc_mss_q6_memnoc_axi_clk.clkr,
+ [GCC_MSS_SNOC_AXI_CLK] = &gcc_mss_snoc_axi_clk.clkr,
+ [GCC_MSS_VS_CLK] = &gcc_mss_vs_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_CLKREF_CLK] = &gcc_pcie_0_clkref_clk.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK_SRC] = &gcc_pcie_1_aux_clk_src.clkr,
+ [GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr,
+ [GCC_PCIE_1_CLKREF_CLK] = &gcc_pcie_1_clkref_clk.clkr,
+ [GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr,
+ [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr,
+ [GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr,
+ [GCC_PCIE_1_SLV_Q2A_AXI_CLK] = &gcc_pcie_1_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_PHY_AUX_CLK] = &gcc_pcie_phy_aux_clk.clkr,
+ [GCC_PCIE_PHY_REFGEN_CLK] = &gcc_pcie_phy_refgen_clk.clkr,
+ [GCC_PCIE_PHY_REFGEN_CLK_SRC] = &gcc_pcie_phy_refgen_clk_src.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_AHB_CLK] = &gcc_qmip_camera_ahb_clk.clkr,
+ [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_AHB_CLK] = &gcc_qmip_video_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK] = &gcc_qupv3_wrap0_s6_clk.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK_SRC] = &gcc_qupv3_wrap0_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK] = &gcc_qupv3_wrap0_s7_clk.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK_SRC] = &gcc_qupv3_wrap0_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK] = &gcc_qupv3_wrap1_s6_clk.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK_SRC] = &gcc_qupv3_wrap1_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK] = &gcc_qupv3_wrap1_s7_clk.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK_SRC] = &gcc_qupv3_wrap1_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
+ [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
+ [GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdcc4_apps_clk_src.clkr,
+ [GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr,
+ [GCC_TSIF_AHB_CLK] = &gcc_tsif_ahb_clk.clkr,
+ [GCC_TSIF_INACTIVITY_TIMERS_CLK] =
+ &gcc_tsif_inactivity_timers_clk.clkr,
+ [GCC_TSIF_REF_CLK] = &gcc_tsif_ref_clk.clkr,
+ [GCC_TSIF_REF_CLK_SRC] = &gcc_tsif_ref_clk_src.clkr,
+ [GCC_UFS_CARD_AHB_CLK] = &gcc_ufs_card_ahb_clk.clkr,
+ [GCC_UFS_CARD_AXI_CLK] = &gcc_ufs_card_axi_clk.clkr,
+ [GCC_UFS_CARD_AXI_CLK_SRC] = &gcc_ufs_card_axi_clk_src.clkr,
+ [GCC_UFS_CARD_CLKREF_CLK] = &gcc_ufs_card_clkref_clk.clkr,
+ [GCC_UFS_CARD_ICE_CORE_CLK] = &gcc_ufs_card_ice_core_clk.clkr,
+ [GCC_UFS_CARD_ICE_CORE_CLK_SRC] = &gcc_ufs_card_ice_core_clk_src.clkr,
+ [GCC_UFS_CARD_PHY_AUX_CLK] = &gcc_ufs_card_phy_aux_clk.clkr,
+ [GCC_UFS_CARD_PHY_AUX_CLK_SRC] = &gcc_ufs_card_phy_aux_clk_src.clkr,
+ [GCC_UFS_CARD_RX_SYMBOL_0_CLK] = &gcc_ufs_card_rx_symbol_0_clk.clkr,
+ [GCC_UFS_CARD_RX_SYMBOL_1_CLK] = &gcc_ufs_card_rx_symbol_1_clk.clkr,
+ [GCC_UFS_CARD_TX_SYMBOL_0_CLK] = &gcc_ufs_card_tx_symbol_0_clk.clkr,
+ [GCC_UFS_CARD_UNIPRO_CORE_CLK] = &gcc_ufs_card_unipro_core_clk.clkr,
+ [GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC] =
+ &gcc_ufs_card_unipro_core_clk_src.clkr,
+ [GCC_UFS_MEM_CLKREF_CLK] = &gcc_ufs_mem_clkref_clk.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] =
+ &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] =
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB30_SEC_MASTER_CLK] = &gcc_usb30_sec_master_clk.clkr,
+ [GCC_USB30_SEC_MASTER_CLK_SRC] = &gcc_usb30_sec_master_clk_src.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_CLK] = &gcc_usb30_sec_mock_utmi_clk.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_CLK_SRC] =
+ &gcc_usb30_sec_mock_utmi_clk_src.clkr,
+ [GCC_USB30_SEC_SLEEP_CLK] = &gcc_usb30_sec_sleep_clk.clkr,
+ [GCC_USB3_PRIM_CLKREF_CLK] = &gcc_usb3_prim_clkref_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB3_SEC_CLKREF_CLK] = &gcc_usb3_sec_clkref_clk.clkr,
+ [GCC_USB3_SEC_PHY_AUX_CLK] = &gcc_usb3_sec_phy_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_AUX_CLK_SRC] = &gcc_usb3_sec_phy_aux_clk_src.clkr,
+ [GCC_USB3_SEC_PHY_COM_AUX_CLK] = &gcc_usb3_sec_phy_com_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_PIPE_CLK] = &gcc_usb3_sec_phy_pipe_clk.clkr,
+ [GCC_USB_PHY_CFG_AHB2PHY_CLK] = &gcc_usb_phy_cfg_ahb2phy_clk.clkr,
+ [GCC_VDDA_VS_CLK] = &gcc_vdda_vs_clk.clkr,
+ [GCC_VDDCX_VS_CLK] = &gcc_vddcx_vs_clk.clkr,
+ [GCC_VDDMX_VS_CLK] = &gcc_vddmx_vs_clk.clkr,
+ [GCC_VIDEO_AHB_CLK] = &gcc_video_ahb_clk.clkr,
+ [GCC_VIDEO_AXI_CLK] = &gcc_video_axi_clk.clkr,
+ [GCC_VIDEO_XO_CLK] = &gcc_video_xo_clk.clkr,
+ [GCC_VS_CTRL_AHB_CLK] = &gcc_vs_ctrl_ahb_clk.clkr,
+ [GCC_VS_CTRL_CLK] = &gcc_vs_ctrl_clk.clkr,
+ [GCC_VS_CTRL_CLK_SRC] = &gcc_vs_ctrl_clk_src.clkr,
+ [GCC_VSENSOR_CLK_SRC] = &gcc_vsensor_clk_src.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_OUT_EVEN] = &gpll0_out_even.clkr,
+ [GPLL4] = &gpll4.clkr,
+};
+
+static const struct qcom_reset_map gcc_sdm845_resets[] = {
+ [GCC_MMSS_BCR] = { 0xb000 },
+ [GCC_PCIE_0_BCR] = { 0x6b000 },
+ [GCC_PCIE_1_BCR] = { 0x8d000 },
+ [GCC_PCIE_PHY_BCR] = { 0x6f000 },
+ [GCC_PDM_BCR] = { 0x33000 },
+ [GCC_PRNG_BCR] = { 0x34000 },
+ [GCC_QUPV3_WRAPPER_0_BCR] = { 0x17000 },
+ [GCC_QUPV3_WRAPPER_1_BCR] = { 0x18000 },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x12000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0x12004 },
+ [GCC_SDCC2_BCR] = { 0x14000 },
+ [GCC_SDCC4_BCR] = { 0x16000 },
+ [GCC_TSIF_BCR] = { 0x36000 },
+ [GCC_UFS_CARD_BCR] = { 0x75000 },
+ [GCC_UFS_PHY_BCR] = { 0x77000 },
+ [GCC_USB30_PRIM_BCR] = { 0xf000 },
+ [GCC_USB30_SEC_BCR] = { 0x10000 },
+ [GCC_USB3_PHY_PRIM_BCR] = { 0x50000 },
+ [GCC_USB3PHY_PHY_PRIM_BCR] = { 0x50004 },
+ [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x50008 },
+ [GCC_USB3_PHY_SEC_BCR] = { 0x5000c },
+ [GCC_USB3PHY_PHY_SEC_BCR] = { 0x50010 },
+ [GCC_USB3_DP_PHY_SEC_BCR] = { 0x50014 },
+ [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 },
+ [GCC_PCIE_0_PHY_BCR] = { 0x6c01c },
+ [GCC_PCIE_1_PHY_BCR] = { 0x8e01c },
+};
+
+static struct gdsc *gcc_sdm845_gdscs[] = {
+ [PCIE_0_GDSC] = &pcie_0_gdsc,
+ [PCIE_1_GDSC] = &pcie_1_gdsc,
+ [UFS_CARD_GDSC] = &ufs_card_gdsc,
+ [UFS_PHY_GDSC] = &ufs_phy_gdsc,
+ [USB30_PRIM_GDSC] = &usb30_prim_gdsc,
+ [USB30_SEC_GDSC] = &usb30_sec_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_AUDIO_TBU_GDSC] =
+ &hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_PCIE_TBU_GDSC] =
+ &hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_TBU1_GDSC] =
+ &hlos1_vote_aggre_noc_mmu_tbu1_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_TBU2_GDSC] =
+ &hlos1_vote_aggre_noc_mmu_tbu2_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC] =
+ &hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC] =
+ &hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_SF_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_sf_gdsc,
+};
+
+static const struct regmap_config gcc_sdm845_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x182090,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_sdm845_desc = {
+ .config = &gcc_sdm845_regmap_config,
+ .clks = gcc_sdm845_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sdm845_clocks),
+ .resets = gcc_sdm845_resets,
+ .num_resets = ARRAY_SIZE(gcc_sdm845_resets),
+ .gdscs = gcc_sdm845_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sdm845_gdscs),
+};
+
+static const struct of_device_id gcc_sdm845_match_table[] = {
+ { .compatible = "qcom,gcc-sdm845" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_sdm845_match_table);
+
+static int gcc_sdm845_probe(struct platform_device *pdev)
+{
+<<<<<<<
+ struct device *dev = &pdev->dev;
+ struct regmap *regmap;
+ int i, ret;
+=======
+ struct regmap *regmap;
+>>>>>>>
+
+ regmap = qcom_cc_map(pdev, &gcc_sdm845_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+<<<<<<<
+ /* Disable the GPLL0 active input to MMSS and GPU via MISC registers */
+ regmap_update_bits(regmap, 0x09ffc, 0x3, 0x3);
+ regmap_update_bits(regmap, 0x71028, 0x3, 0x3);
+
+ /* Enable CPUSS clocks */
+ regmap_update_bits(regmap, 0x48190, BIT(0), 0x1);
+ regmap_update_bits(regmap, 0x52004, BIT(22), 0x1);
+
+=======
+ for (i = 0; i < ARRAY_SIZE(gcc_sdm845_hws); i++) {
+ ret = devm_clk_hw_register(dev, gcc_sdm845_hws[i]);
+ if (ret)
+ return ret;
+ }
+
+ /* Get the rate for safe source */
+ cxo_safe_src_f.freq = clk_get_rate(bi_tcxo.hw.clk);
+
+ /* Disable the GPLL0 active input to MMSS and GPU via MISC registers */
+ regmap_update_bits(regmap, 0x09FFC, 0x3, 0x3);
+ regmap_update_bits(regmap, 0x71028, 0x3, 0x3);
+
+>>>>>>>
+ return qcom_cc_really_probe(pdev, &gcc_sdm845_desc, regmap);
+}
+
+static struct platform_driver gcc_sdm845_driver = {
+ .probe = gcc_sdm845_probe,
+ .driver = {
+ .name = "gcc-sdm845",
+ .of_match_table = gcc_sdm845_match_table,
+ },
+};
+
+static int __init gcc_sdm845_init(void)
+{
+ return platform_driver_register(&gcc_sdm845_driver);
+}
+subsys_initcall(gcc_sdm845_init);
+
+static void __exit gcc_sdm845_exit(void)
+{
+ platform_driver_unregister(&gcc_sdm845_driver);
+}
+module_exit(gcc_sdm845_exit);
+
+MODULE_DESCRIPTION("QTI GCC SDM845 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:gcc-sdm845");
diff --git a/rr-cache/43c60db993e04ae4bbaad73360b168398f7c9aa4/preimage.1 b/rr-cache/43c60db993e04ae4bbaad73360b168398f7c9aa4/preimage.1
new file mode 100644
index 0000000..f0ffe96
--- /dev/null
+++ b/rr-cache/43c60db993e04ae4bbaad73360b168398f7c9aa4/preimage.1
@@ -0,0 +1,3988 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+<<<<<<<
+=======
+#include <linux/clk.h>
+>>>>>>>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "clk-alpha-pll.h"
+#include "gdsc.h"
+#include "reset.h"
+
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+
+<<<<<<<
+=======
+static struct freq_tbl cxo_safe_src_f = {
+ .pre_div = 1,
+};
+
+>>>>>>>
+enum {
+ P_BI_TCXO,
+ P_AUD_REF_CLK,
+ P_CORE_BI_PLL_TEST_SE,
+ P_GPLL0_OUT_EVEN,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL4_OUT_MAIN,
+ P_SLEEP_CLK,
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_0[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll0_out_even",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_1[] = {
+ "bi_tcxo",
+ "gpll0",
+ "core_pi_sleep_clk",
+ "gpll0_out_even",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_2[] = {
+ "bi_tcxo",
+ "core_pi_sleep_clk",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_3[] = {
+ "bi_tcxo",
+ "gpll0",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_4[] = {
+ "bi_tcxo",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_5[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll4",
+ "gpll0_out_even",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_AUD_REF_CLK, 2 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_6[] = {
+ "bi_tcxo",
+ "gpll0",
+ "aud_ref_clk",
+ "gpll0_out_even",
+ "core_bi_pll_test_se",
+};
+
+static const char * const gcc_parent_names_7[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll0_out_even",
+ "core_bi_pll_test_se",
+};
+
+static const char * const gcc_parent_names_8[] = {
+ "bi_tcxo",
+ "gpll0",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_10[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll4",
+ "gpll0_out_even",
+ "core_bi_pll_test_se",
+};
+
+<<<<<<<
+=======
+static struct clk_fixed_factor bi_tcxo = {
+ .mult = 1,
+ .div = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "bi_tcxo",
+ .parent_names = (const char *[]){ "xo_board" },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+>>>>>>>
+static struct clk_alpha_pll gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_names = (const char *[]){ "bi_tcxo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll4 = {
+ .offset = 0x76000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll4",
+ .parent_names = (const char *[]){ "bi_tcxo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_fabia_even[] = {
+ { 0x0, 1 },
+ { 0x1, 2 },
+ { 0x3, 4 },
+ { 0x7, 8 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_fabia_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_even",
+ .parent_names = (const char *[]){ "gpll0" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
+ .cmd_rcgr = 0x48014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk_src",
+ .parent_names = gcc_parent_names_7,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_cpuss_rbcpr_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_cpuss_rbcpr_clk_src = {
+ .cmd_rcgr = 0x4815c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_rbcpr_clk_src",
+ .parent_names = gcc_parent_names_8,
+ .num_parents = 3,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x64004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 5,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x65004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 5,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x66004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 5,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0x6b028,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_aux_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 3,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_aux_clk_src = {
+ .cmd_rcgr = 0x8d028,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_aux_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 3,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_phy_refgen_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_phy_refgen_clk_src = {
+ .cmd_rcgr = 0x6f014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_phy_refgen_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_phy_refgen_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x33010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(80000000, P_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(102400000, P_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(120000000, P_GPLL0_OUT_EVEN, 2.5, 0, 0),
+ F(128000000, P_GPLL0_OUT_MAIN, 1, 16, 75),
+ { }
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x17034,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x17164,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+ .cmd_rcgr = 0x17294,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x173c4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x174f4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x17624,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
+ .cmd_rcgr = 0x17754,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s6_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
+ .cmd_rcgr = 0x17884,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s7_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x18018,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x18148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
+ .cmd_rcgr = 0x18278,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x183a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x184d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x18608,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
+ .cmd_rcgr = 0x18738,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s6_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
+ .cmd_rcgr = 0x18868,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s7_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(201500000, P_GPLL4_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x1400c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_10,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_names = gcc_parent_names_10,
+ .num_parents = 5,
+<<<<<<<
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+=======
+ .ops = &clk_rcg2_ops,
+>>>>>>>
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc4_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
+ .cmd_rcgr = 0x1600c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_sdcc4_apps_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+=======
+ .ops = &clk_rcg2_ops,
+>>>>>>>
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_tsif_ref_clk_src[] = {
+ F(105495, P_BI_TCXO, 2, 1, 91),
+ { }
+};
+
+static struct clk_rcg2 gcc_tsif_ref_clk_src = {
+ .cmd_rcgr = 0x36010,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_tsif_ref_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ref_clk_src",
+ .parent_names = gcc_parent_names_6,
+ .num_parents = 5,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_axi_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_axi_clk_src = {
+ .cmd_rcgr = 0x7501c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_axi_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_axi_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_ice_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_ice_core_clk_src = {
+ .cmd_rcgr = 0x7505c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_ice_core_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_card_phy_aux_clk_src = {
+ .cmd_rcgr = 0x75090,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_phy_aux_clk_src",
+ .parent_names = gcc_parent_names_4,
+ .num_parents = 2,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_unipro_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_unipro_core_clk_src = {
+ .cmd_rcgr = 0x75074,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_unipro_core_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_unipro_core_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x7701c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x7705c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x77090,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_names = gcc_parent_names_4,
+ .num_parents = 2,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x77074,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_unipro_core_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(33333333, P_GPLL0_OUT_EVEN, 9, 0, 0),
+ F(66666667, P_GPLL0_OUT_EVEN, 4.5, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0xf018,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_mock_utmi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(20000000, P_GPLL0_OUT_EVEN, 15, 0, 0),
+ F(40000000, P_GPLL0_OUT_EVEN, 7.5, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xf030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_sec_master_clk_src = {
+ .cmd_rcgr = 0x10018,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_master_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_sec_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x10030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_mock_utmi_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0xf05c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 3,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_sec_phy_aux_clk_src = {
+ .cmd_rcgr = 0x1005c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_aux_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 3,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_vs_ctrl_clk_src = {
+ .cmd_rcgr = 0x7a030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_vs_ctrl_clk_src",
+ .parent_names = gcc_parent_names_3,
+ .num_parents = 3,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_vsensor_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ F(600000000, P_GPLL0_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_vsensor_clk_src = {
+ .cmd_rcgr = 0x7a018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_vsensor_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_vsensor_clk_src",
+ .parent_names = gcc_parent_names_8,
+ .num_parents = 3,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_tbu_clk = {
+ .halt_reg = 0x90014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x90014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_noc_pcie_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_card_axi_clk = {
+ .halt_reg = 0x82028,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x82028,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x82028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_card_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_card_axi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+ .halt_reg = 0x82024,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x82024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x82024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_phy_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_phy_axi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0x8201c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_prim_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_sec_axi_clk = {
+ .halt_reg = 0x82020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x82020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_usb3_sec_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_sec_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_apc_vs_clk = {
+ .halt_reg = 0x7a050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_apc_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x38004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x38004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_ahb_clk = {
+ .halt_reg = 0xb008,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_ahb_clk",
+<<<<<<<
+=======
+ .flags = CLK_IS_CRITICAL,
+>>>>>>>
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_axi_clk = {
+ .halt_reg = 0xb020,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0xb020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_xo_clk = {
+ .halt_reg = 0xb02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_xo_clk",
+<<<<<<<
+=======
+ .flags = CLK_IS_CRITICAL,
+>>>>>>>
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_ahb_clk = {
+ .halt_reg = 0x4100c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x4100c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_axi_clk = {
+ .halt_reg = 0x41008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_clk = {
+ .halt_reg = 0x41004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x502c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x502c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_prim_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_sec_axi_clk = {
+ .halt_reg = 0x5030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_sec_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_sec_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_ahb_clk = {
+ .halt_reg = 0x48000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk",
+ .parent_names = (const char *[]){
+ "gcc_cpuss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+<<<<<<<
+=======
+static struct clk_branch gcc_cpuss_dvm_bus_clk = {
+ .halt_reg = 0x48190,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x48190,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_dvm_bus_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_gnoc_clk = {
+ .halt_reg = 0x48004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x48004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_gnoc_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+>>>>>>>
+static struct clk_branch gcc_cpuss_rbcpr_clk = {
+ .halt_reg = 0x48008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x48008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_rbcpr_clk",
+ .parent_names = (const char *[]){
+ "gcc_cpuss_rbcpr_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_gpu_axi_clk = {
+ .halt_reg = 0x44038,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x44038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ddrss_gpu_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_ahb_clk = {
+ .halt_reg = 0xb00c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb00c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_ahb_clk",
+<<<<<<<
+=======
+ .flags = CLK_IS_CRITICAL,
+>>>>>>>
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_axi_clk = {
+ .halt_reg = 0xb024,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0xb024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_gpll0_clk_src = {
+<<<<<<<
+=======
+ .halt_reg = 0x52004,
+>>>>>>>
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(18),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_gpll0_clk_src",
+ .parent_names = (const char *[]){
+ "gpll0",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_gpll0_div_clk_src = {
+<<<<<<<
+=======
+ .halt_reg = 0x52004,
+>>>>>>>
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_gpll0_div_clk_src",
+ .parent_names = (const char *[]){
+ "gpll0_out_even",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_xo_clk = {
+ .halt_reg = 0xb030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_xo_clk",
+<<<<<<<
+=======
+ .flags = CLK_IS_CRITICAL,
+>>>>>>>
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x64000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x64000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_names = (const char *[]){
+ "gcc_gp1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x65000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x65000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_names = (const char *[]){
+ "gcc_gp2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x66000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x66000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_names = (const char *[]){
+ "gcc_gp3_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_cfg_ahb_clk = {
+ .halt_reg = 0x71004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x71004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_cfg_ahb_clk",
+<<<<<<<
+=======
+ .flags = CLK_IS_CRITICAL,
+>>>>>>>
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+<<<<<<<
+=======
+ .halt_reg = 0x52004,
+>>>>>>>
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_names = (const char *[]){
+ "gpll0",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+<<<<<<<
+=======
+ .halt_reg = 0x52004,
+>>>>>>>
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_names = (const char *[]){
+ "gpll0_out_even",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_iref_clk = {
+ .halt_reg = 0x8c010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_iref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+ .halt_reg = 0x7100c,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x7100c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x71018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x71018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_vs_clk = {
+ .halt_reg = 0x7a04c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a04c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_axis2_clk = {
+ .halt_reg = 0x8a008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8a008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_axis2_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_cfg_ahb_clk = {
+ .halt_reg = 0x8a000,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x8a000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x8a000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_gpll0_div_clk_src = {
+<<<<<<<
+=======
+ .halt_reg = 0x52004,
+>>>>>>>
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_gpll0_div_clk_src",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_mfab_axis_clk = {
+ .halt_reg = 0x8a004,
+ .halt_check = BRANCH_VOTED,
+ .hwcg_reg = 0x8a004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x8a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_mfab_axis_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_q6_memnoc_axi_clk = {
+ .halt_reg = 0x8a154,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x8a154,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_q6_memnoc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_snoc_axi_clk = {
+ .halt_reg = 0x8a150,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8a150,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_snoc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_vs_clk = {
+ .halt_reg = 0x7a048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0x6b01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_pcie_0_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0x6b018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_clkref_clk = {
+ .halt_reg = 0x8c00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0x6b014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+<<<<<<<
+ .halt_check = BRANCH_HALT_SKIP,
+=======
+ .halt_reg = 0x6b020,
+ .halt_check = BRANCH_HALT_DELAY,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0x6b010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = {
+ .halt_reg = 0x6b00c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_aux_clk = {
+ .halt_reg = 0x8d01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(29),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_pcie_1_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
+ .halt_reg = 0x8d018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8d018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(28),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_clkref_clk = {
+ .halt_reg = 0x8c02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
+ .halt_reg = 0x8d014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_pipe_clk = {
+<<<<<<<
+ .halt_check = BRANCH_HALT_SKIP,
+=======
+ .halt_reg = 0x8d020,
+ .halt_check = BRANCH_HALT_DELAY,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(30),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_axi_clk = {
+ .halt_reg = 0x8d010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8d010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_q2a_axi_clk = {
+ .halt_reg = 0x8d00c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_aux_clk = {
+ .halt_reg = 0x6f004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6f004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_phy_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_pcie_0_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_refgen_clk = {
+ .halt_reg = 0x6f02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6f02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_phy_refgen_clk",
+ .parent_names = (const char *[]){
+ "gcc_pcie_phy_refgen_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_names = (const char *[]){
+ "gcc_pdm2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x33004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x33004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x33004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x33008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x34004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x34004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_ahb_clk = {
+ .halt_reg = 0xb014,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_camera_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp_ahb_clk = {
+ .halt_reg = 0xb018,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_disp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_ahb_clk = {
+ .halt_reg = 0xb010,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_video_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x17030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x17160,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x17290,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x173c0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s3_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x174f0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s4_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x17620,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s5_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s6_clk = {
+ .halt_reg = 0x17750,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s6_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s6_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s7_clk = {
+ .halt_reg = 0x17880,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s7_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s7_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x18014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap1_s0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x18144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(23),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap1_s1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x18274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(24),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap1_s2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x183a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap1_s3_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x184d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap1_s4_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x18604,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap1_s5_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s6_clk = {
+ .halt_reg = 0x18734,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(28),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s6_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap1_s6_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s7_clk = {
+ .halt_reg = 0x18864,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(29),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s7_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap1_s7_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x17008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x1800c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x18010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x18010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x14008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x14004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_names = (const char *[]){
+ "gcc_sdcc2_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_ahb_clk = {
+ .halt_reg = 0x16008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_apps_clk = {
+ .halt_reg = 0x16004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk",
+ .parent_names = (const char *[]){
+ "gcc_sdcc4_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = {
+ .halt_reg = 0x414c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sys_noc_cpuss_ahb_clk",
+ .parent_names = (const char *[]){
+ "gcc_cpuss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_ahb_clk = {
+ .halt_reg = 0x36004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_inactivity_timers_clk = {
+ .halt_reg = 0x3600c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_inactivity_timers_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_ref_clk = {
+ .halt_reg = 0x36008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ref_clk",
+ .parent_names = (const char *[]){
+ "gcc_tsif_ref_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_ahb_clk = {
+ .halt_reg = 0x75010,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x75010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_axi_clk = {
+ .halt_reg = 0x7500c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7500c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7500c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_card_axi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_clkref_clk = {
+ .halt_reg = 0x8c004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_ice_core_clk = {
+ .halt_reg = 0x75058,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x75058,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_ice_core_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_card_ice_core_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_phy_aux_clk = {
+ .halt_reg = 0x7508c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7508c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7508c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_phy_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_card_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_rx_symbol_0_clk = {
+<<<<<<<
+ .halt_check = BRANCH_HALT_SKIP,
+=======
+ .halt_reg = 0x75018,
+ .halt_check = BRANCH_HALT_DELAY,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0x75018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_rx_symbol_1_clk = {
+<<<<<<<
+ .halt_check = BRANCH_HALT_SKIP,
+=======
+ .halt_reg = 0x750a8,
+ .halt_check = BRANCH_HALT_DELAY,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0x750a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_rx_symbol_1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_tx_symbol_0_clk = {
+<<<<<<<
+ .halt_check = BRANCH_HALT_SKIP,
+=======
+ .halt_reg = 0x75014,
+ .halt_check = BRANCH_HALT_DELAY,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0x75014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_unipro_core_clk = {
+ .halt_reg = 0x75054,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x75054,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_unipro_core_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_card_unipro_core_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_mem_clkref_clk = {
+ .halt_reg = 0x8c000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_mem_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0x77010,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x7700c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7700c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7700c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_phy_axi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x77058,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77058,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_phy_ice_core_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x7708c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7708c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7708c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_phy_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+<<<<<<<
+ .halt_check = BRANCH_HALT_SKIP,
+=======
+ .halt_reg = 0x77018,
+ .halt_check = BRANCH_HALT_DELAY,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0x77018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = {
+<<<<<<<
+ .halt_check = BRANCH_HALT_SKIP,
+=======
+ .halt_reg = 0x770a8,
+ .halt_check = BRANCH_HALT_DELAY,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0x770a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+<<<<<<<
+ .halt_check = BRANCH_HALT_SKIP,
+=======
+ .halt_reg = 0x77014,
+ .halt_check = BRANCH_HALT_DELAY,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0x77014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x77054,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77054,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_phy_unipro_core_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0xf00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_prim_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0xf014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_prim_mock_utmi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0xf010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_master_clk = {
+ .halt_reg = 0x1000c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1000c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_master_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_sec_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_mock_utmi_clk = {
+ .halt_reg = 0x10014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_mock_utmi_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_sec_mock_utmi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_sleep_clk = {
+ .halt_reg = 0x10010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_clkref_clk = {
+ .halt_reg = 0x8c008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0xf04c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf04c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb3_prim_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0xf050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb3_prim_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+<<<<<<<
+ .halt_check = BRANCH_HALT_SKIP,
+=======
+ .halt_reg = 0xf054,
+ .halt_check = BRANCH_HALT_DELAY,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0xf054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_clkref_clk = {
+ .halt_reg = 0x8c028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_aux_clk = {
+ .halt_reg = 0x1004c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1004c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb3_sec_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_com_aux_clk = {
+ .halt_reg = 0x10050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_com_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb3_sec_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_pipe_clk = {
+<<<<<<<
+ .halt_check = BRANCH_HALT_SKIP,
+=======
+ .halt_reg = 0x10054,
+ .halt_check = BRANCH_HALT_DELAY,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0x10054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = {
+ .halt_reg = 0x6a004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x6a004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_phy_cfg_ahb2phy_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vdda_vs_clk = {
+ .halt_reg = 0x7a00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vdda_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vddcx_vs_clk = {
+ .halt_reg = 0x7a004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vddcx_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vddmx_vs_clk = {
+ .halt_reg = 0x7a008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vddmx_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_ahb_clk = {
+ .halt_reg = 0xb004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_ahb_clk",
+<<<<<<<
+=======
+ .flags = CLK_IS_CRITICAL,
+>>>>>>>
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi_clk = {
+ .halt_reg = 0xb01c,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0xb01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_xo_clk = {
+ .halt_reg = 0xb028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_xo_clk",
+<<<<<<<
+=======
+ .flags = CLK_IS_CRITICAL,
+>>>>>>>
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vs_ctrl_ahb_clk = {
+ .halt_reg = 0x7a014,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7a014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7a014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vs_ctrl_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vs_ctrl_clk = {
+ .halt_reg = 0x7a010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vs_ctrl_clk",
+ .parent_names = (const char *[]){
+ "gcc_vs_ctrl_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc pcie_0_gdsc = {
+ .gdscr = 0x6b004,
+ .pd = {
+ .name = "pcie_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = POLL_CFG_GDSCR,
+>>>>>>>
+};
+
+static struct gdsc pcie_1_gdsc = {
+ .gdscr = 0x8d004,
+ .pd = {
+ .name = "pcie_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = POLL_CFG_GDSCR,
+>>>>>>>
+};
+
+static struct gdsc ufs_card_gdsc = {
+ .gdscr = 0x75004,
+ .pd = {
+ .name = "ufs_card_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = POLL_CFG_GDSCR,
+>>>>>>>
+};
+
+static struct gdsc ufs_phy_gdsc = {
+ .gdscr = 0x77004,
+ .pd = {
+ .name = "ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = POLL_CFG_GDSCR,
+>>>>>>>
+};
+
+static struct gdsc usb30_prim_gdsc = {
+ .gdscr = 0xf004,
+ .pd = {
+ .name = "usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = POLL_CFG_GDSCR,
+>>>>>>>
+};
+
+static struct gdsc usb30_sec_gdsc = {
+ .gdscr = 0x10004,
+ .pd = {
+ .name = "usb30_sec_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = POLL_CFG_GDSCR,
+>>>>>>>
+};
+
+static struct gdsc hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc = {
+ .gdscr = 0x7d030,
+ .pd = {
+ .name = "hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = VOTABLE,
+>>>>>>>
+};
+
+static struct gdsc hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc = {
+ .gdscr = 0x7d03c,
+ .pd = {
+ .name = "hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = VOTABLE,
+>>>>>>>
+};
+
+static struct gdsc hlos1_vote_aggre_noc_mmu_tbu1_gdsc = {
+ .gdscr = 0x7d034,
+ .pd = {
+ .name = "hlos1_vote_aggre_noc_mmu_tbu1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = VOTABLE,
+>>>>>>>
+};
+
+static struct gdsc hlos1_vote_aggre_noc_mmu_tbu2_gdsc = {
+ .gdscr = 0x7d038,
+ .pd = {
+ .name = "hlos1_vote_aggre_noc_mmu_tbu2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = VOTABLE,
+>>>>>>>
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = {
+ .gdscr = 0x7d040,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = VOTABLE,
+>>>>>>>
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = {
+ .gdscr = 0x7d048,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = VOTABLE,
+>>>>>>>
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = {
+ .gdscr = 0x7d044,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_sf_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = VOTABLE,
+};
+
+static struct clk_hw *gcc_sdm845_hws[] = {
+ [BI_TCXO] = &bi_tcxo.hw,
+>>>>>>>
+};
+
+static struct clk_regmap *gcc_sdm845_clocks[] = {
+ [GCC_AGGRE_NOC_PCIE_TBU_CLK] = &gcc_aggre_noc_pcie_tbu_clk.clkr,
+ [GCC_AGGRE_UFS_CARD_AXI_CLK] = &gcc_aggre_ufs_card_axi_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_AGGRE_USB3_SEC_AXI_CLK] = &gcc_aggre_usb3_sec_axi_clk.clkr,
+ [GCC_APC_VS_CLK] = &gcc_apc_vs_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_AHB_CLK] = &gcc_camera_ahb_clk.clkr,
+ [GCC_CAMERA_AXI_CLK] = &gcc_camera_axi_clk.clkr,
+ [GCC_CAMERA_XO_CLK] = &gcc_camera_xo_clk.clkr,
+ [GCC_CE1_AHB_CLK] = &gcc_ce1_ahb_clk.clkr,
+ [GCC_CE1_AXI_CLK] = &gcc_ce1_axi_clk.clkr,
+ [GCC_CE1_CLK] = &gcc_ce1_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_SEC_AXI_CLK] = &gcc_cfg_noc_usb3_sec_axi_clk.clkr,
+ [GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr,
+ [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr,
+<<<<<<<
+=======
+ [GCC_CPUSS_DVM_BUS_CLK] = &gcc_cpuss_dvm_bus_clk.clkr,
+ [GCC_CPUSS_GNOC_CLK] = &gcc_cpuss_gnoc_clk.clkr,
+>>>>>>>
+ [GCC_CPUSS_RBCPR_CLK] = &gcc_cpuss_rbcpr_clk.clkr,
+ [GCC_CPUSS_RBCPR_CLK_SRC] = &gcc_cpuss_rbcpr_clk_src.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DISP_AHB_CLK] = &gcc_disp_ahb_clk.clkr,
+ [GCC_DISP_AXI_CLK] = &gcc_disp_axi_clk.clkr,
+ [GCC_DISP_GPLL0_CLK_SRC] = &gcc_disp_gpll0_clk_src.clkr,
+ [GCC_DISP_GPLL0_DIV_CLK_SRC] = &gcc_disp_gpll0_div_clk_src.clkr,
+ [GCC_DISP_XO_CLK] = &gcc_disp_xo_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_GPU_IREF_CLK] = &gcc_gpu_iref_clk.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_GPU_VS_CLK] = &gcc_gpu_vs_clk.clkr,
+ [GCC_MSS_AXIS2_CLK] = &gcc_mss_axis2_clk.clkr,
+ [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
+ [GCC_MSS_GPLL0_DIV_CLK_SRC] = &gcc_mss_gpll0_div_clk_src.clkr,
+ [GCC_MSS_MFAB_AXIS_CLK] = &gcc_mss_mfab_axis_clk.clkr,
+ [GCC_MSS_Q6_MEMNOC_AXI_CLK] = &gcc_mss_q6_memnoc_axi_clk.clkr,
+ [GCC_MSS_SNOC_AXI_CLK] = &gcc_mss_snoc_axi_clk.clkr,
+ [GCC_MSS_VS_CLK] = &gcc_mss_vs_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_CLKREF_CLK] = &gcc_pcie_0_clkref_clk.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK_SRC] = &gcc_pcie_1_aux_clk_src.clkr,
+ [GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr,
+ [GCC_PCIE_1_CLKREF_CLK] = &gcc_pcie_1_clkref_clk.clkr,
+ [GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr,
+ [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr,
+ [GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr,
+ [GCC_PCIE_1_SLV_Q2A_AXI_CLK] = &gcc_pcie_1_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_PHY_AUX_CLK] = &gcc_pcie_phy_aux_clk.clkr,
+ [GCC_PCIE_PHY_REFGEN_CLK] = &gcc_pcie_phy_refgen_clk.clkr,
+ [GCC_PCIE_PHY_REFGEN_CLK_SRC] = &gcc_pcie_phy_refgen_clk_src.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_AHB_CLK] = &gcc_qmip_camera_ahb_clk.clkr,
+ [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_AHB_CLK] = &gcc_qmip_video_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK] = &gcc_qupv3_wrap0_s6_clk.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK_SRC] = &gcc_qupv3_wrap0_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK] = &gcc_qupv3_wrap0_s7_clk.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK_SRC] = &gcc_qupv3_wrap0_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK] = &gcc_qupv3_wrap1_s6_clk.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK_SRC] = &gcc_qupv3_wrap1_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK] = &gcc_qupv3_wrap1_s7_clk.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK_SRC] = &gcc_qupv3_wrap1_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
+ [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
+ [GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdcc4_apps_clk_src.clkr,
+ [GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr,
+ [GCC_TSIF_AHB_CLK] = &gcc_tsif_ahb_clk.clkr,
+ [GCC_TSIF_INACTIVITY_TIMERS_CLK] =
+ &gcc_tsif_inactivity_timers_clk.clkr,
+ [GCC_TSIF_REF_CLK] = &gcc_tsif_ref_clk.clkr,
+ [GCC_TSIF_REF_CLK_SRC] = &gcc_tsif_ref_clk_src.clkr,
+ [GCC_UFS_CARD_AHB_CLK] = &gcc_ufs_card_ahb_clk.clkr,
+ [GCC_UFS_CARD_AXI_CLK] = &gcc_ufs_card_axi_clk.clkr,
+ [GCC_UFS_CARD_AXI_CLK_SRC] = &gcc_ufs_card_axi_clk_src.clkr,
+ [GCC_UFS_CARD_CLKREF_CLK] = &gcc_ufs_card_clkref_clk.clkr,
+ [GCC_UFS_CARD_ICE_CORE_CLK] = &gcc_ufs_card_ice_core_clk.clkr,
+ [GCC_UFS_CARD_ICE_CORE_CLK_SRC] = &gcc_ufs_card_ice_core_clk_src.clkr,
+ [GCC_UFS_CARD_PHY_AUX_CLK] = &gcc_ufs_card_phy_aux_clk.clkr,
+ [GCC_UFS_CARD_PHY_AUX_CLK_SRC] = &gcc_ufs_card_phy_aux_clk_src.clkr,
+ [GCC_UFS_CARD_RX_SYMBOL_0_CLK] = &gcc_ufs_card_rx_symbol_0_clk.clkr,
+ [GCC_UFS_CARD_RX_SYMBOL_1_CLK] = &gcc_ufs_card_rx_symbol_1_clk.clkr,
+ [GCC_UFS_CARD_TX_SYMBOL_0_CLK] = &gcc_ufs_card_tx_symbol_0_clk.clkr,
+ [GCC_UFS_CARD_UNIPRO_CORE_CLK] = &gcc_ufs_card_unipro_core_clk.clkr,
+ [GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC] =
+ &gcc_ufs_card_unipro_core_clk_src.clkr,
+ [GCC_UFS_MEM_CLKREF_CLK] = &gcc_ufs_mem_clkref_clk.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] =
+ &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] =
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB30_SEC_MASTER_CLK] = &gcc_usb30_sec_master_clk.clkr,
+ [GCC_USB30_SEC_MASTER_CLK_SRC] = &gcc_usb30_sec_master_clk_src.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_CLK] = &gcc_usb30_sec_mock_utmi_clk.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_CLK_SRC] =
+ &gcc_usb30_sec_mock_utmi_clk_src.clkr,
+ [GCC_USB30_SEC_SLEEP_CLK] = &gcc_usb30_sec_sleep_clk.clkr,
+ [GCC_USB3_PRIM_CLKREF_CLK] = &gcc_usb3_prim_clkref_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB3_SEC_CLKREF_CLK] = &gcc_usb3_sec_clkref_clk.clkr,
+ [GCC_USB3_SEC_PHY_AUX_CLK] = &gcc_usb3_sec_phy_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_AUX_CLK_SRC] = &gcc_usb3_sec_phy_aux_clk_src.clkr,
+ [GCC_USB3_SEC_PHY_COM_AUX_CLK] = &gcc_usb3_sec_phy_com_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_PIPE_CLK] = &gcc_usb3_sec_phy_pipe_clk.clkr,
+ [GCC_USB_PHY_CFG_AHB2PHY_CLK] = &gcc_usb_phy_cfg_ahb2phy_clk.clkr,
+ [GCC_VDDA_VS_CLK] = &gcc_vdda_vs_clk.clkr,
+ [GCC_VDDCX_VS_CLK] = &gcc_vddcx_vs_clk.clkr,
+ [GCC_VDDMX_VS_CLK] = &gcc_vddmx_vs_clk.clkr,
+ [GCC_VIDEO_AHB_CLK] = &gcc_video_ahb_clk.clkr,
+ [GCC_VIDEO_AXI_CLK] = &gcc_video_axi_clk.clkr,
+ [GCC_VIDEO_XO_CLK] = &gcc_video_xo_clk.clkr,
+ [GCC_VS_CTRL_AHB_CLK] = &gcc_vs_ctrl_ahb_clk.clkr,
+ [GCC_VS_CTRL_CLK] = &gcc_vs_ctrl_clk.clkr,
+ [GCC_VS_CTRL_CLK_SRC] = &gcc_vs_ctrl_clk_src.clkr,
+ [GCC_VSENSOR_CLK_SRC] = &gcc_vsensor_clk_src.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_OUT_EVEN] = &gpll0_out_even.clkr,
+ [GPLL4] = &gpll4.clkr,
+};
+
+static const struct qcom_reset_map gcc_sdm845_resets[] = {
+ [GCC_MMSS_BCR] = { 0xb000 },
+ [GCC_PCIE_0_BCR] = { 0x6b000 },
+ [GCC_PCIE_1_BCR] = { 0x8d000 },
+ [GCC_PCIE_PHY_BCR] = { 0x6f000 },
+ [GCC_PDM_BCR] = { 0x33000 },
+ [GCC_PRNG_BCR] = { 0x34000 },
+ [GCC_QUPV3_WRAPPER_0_BCR] = { 0x17000 },
+ [GCC_QUPV3_WRAPPER_1_BCR] = { 0x18000 },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x12000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0x12004 },
+ [GCC_SDCC2_BCR] = { 0x14000 },
+ [GCC_SDCC4_BCR] = { 0x16000 },
+ [GCC_TSIF_BCR] = { 0x36000 },
+ [GCC_UFS_CARD_BCR] = { 0x75000 },
+ [GCC_UFS_PHY_BCR] = { 0x77000 },
+ [GCC_USB30_PRIM_BCR] = { 0xf000 },
+ [GCC_USB30_SEC_BCR] = { 0x10000 },
+ [GCC_USB3_PHY_PRIM_BCR] = { 0x50000 },
+ [GCC_USB3PHY_PHY_PRIM_BCR] = { 0x50004 },
+ [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x50008 },
+ [GCC_USB3_PHY_SEC_BCR] = { 0x5000c },
+ [GCC_USB3PHY_PHY_SEC_BCR] = { 0x50010 },
+ [GCC_USB3_DP_PHY_SEC_BCR] = { 0x50014 },
+ [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 },
+ [GCC_PCIE_0_PHY_BCR] = { 0x6c01c },
+ [GCC_PCIE_1_PHY_BCR] = { 0x8e01c },
+};
+
+static struct gdsc *gcc_sdm845_gdscs[] = {
+ [PCIE_0_GDSC] = &pcie_0_gdsc,
+ [PCIE_1_GDSC] = &pcie_1_gdsc,
+ [UFS_CARD_GDSC] = &ufs_card_gdsc,
+ [UFS_PHY_GDSC] = &ufs_phy_gdsc,
+ [USB30_PRIM_GDSC] = &usb30_prim_gdsc,
+ [USB30_SEC_GDSC] = &usb30_sec_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_AUDIO_TBU_GDSC] =
+ &hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_PCIE_TBU_GDSC] =
+ &hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_TBU1_GDSC] =
+ &hlos1_vote_aggre_noc_mmu_tbu1_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_TBU2_GDSC] =
+ &hlos1_vote_aggre_noc_mmu_tbu2_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC] =
+ &hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC] =
+ &hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_SF_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_sf_gdsc,
+};
+
+static const struct regmap_config gcc_sdm845_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x182090,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_sdm845_desc = {
+ .config = &gcc_sdm845_regmap_config,
+ .clks = gcc_sdm845_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sdm845_clocks),
+ .resets = gcc_sdm845_resets,
+ .num_resets = ARRAY_SIZE(gcc_sdm845_resets),
+ .gdscs = gcc_sdm845_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sdm845_gdscs),
+};
+
+static const struct of_device_id gcc_sdm845_match_table[] = {
+ { .compatible = "qcom,gcc-sdm845" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_sdm845_match_table);
+
+static int gcc_sdm845_probe(struct platform_device *pdev)
+{
+<<<<<<<
+ struct device *dev = &pdev->dev;
+ struct regmap *regmap;
+ int i, ret;
+=======
+ struct regmap *regmap;
+>>>>>>>
+
+ regmap = qcom_cc_map(pdev, &gcc_sdm845_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+<<<<<<<
+ /* Disable the GPLL0 active input to MMSS and GPU via MISC registers */
+ regmap_update_bits(regmap, 0x09ffc, 0x3, 0x3);
+ regmap_update_bits(regmap, 0x71028, 0x3, 0x3);
+
+ /* Enable CPUSS clocks */
+ regmap_update_bits(regmap, 0x48190, BIT(0), 0x1);
+ regmap_update_bits(regmap, 0x52004, BIT(22), 0x1);
+
+=======
+ for (i = 0; i < ARRAY_SIZE(gcc_sdm845_hws); i++) {
+ ret = devm_clk_hw_register(dev, gcc_sdm845_hws[i]);
+ if (ret)
+ return ret;
+ }
+
+ /* Get the rate for safe source */
+ cxo_safe_src_f.freq = clk_get_rate(bi_tcxo.hw.clk);
+
+ /* Disable the GPLL0 active input to MMSS and GPU via MISC registers */
+ regmap_update_bits(regmap, 0x09FFC, 0x3, 0x3);
+ regmap_update_bits(regmap, 0x71028, 0x3, 0x3);
+
+>>>>>>>
+ return qcom_cc_really_probe(pdev, &gcc_sdm845_desc, regmap);
+}
+
+static struct platform_driver gcc_sdm845_driver = {
+ .probe = gcc_sdm845_probe,
+ .driver = {
+ .name = "gcc-sdm845",
+ .of_match_table = gcc_sdm845_match_table,
+ },
+};
+
+static int __init gcc_sdm845_init(void)
+{
+ return platform_driver_register(&gcc_sdm845_driver);
+}
+subsys_initcall(gcc_sdm845_init);
+
+static void __exit gcc_sdm845_exit(void)
+{
+ platform_driver_unregister(&gcc_sdm845_driver);
+}
+module_exit(gcc_sdm845_exit);
+
+MODULE_DESCRIPTION("QTI GCC SDM845 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:gcc-sdm845");
diff --git a/rr-cache/43c60db993e04ae4bbaad73360b168398f7c9aa4/preimage.2 b/rr-cache/43c60db993e04ae4bbaad73360b168398f7c9aa4/preimage.2
new file mode 100644
index 0000000..f0ffe96
--- /dev/null
+++ b/rr-cache/43c60db993e04ae4bbaad73360b168398f7c9aa4/preimage.2
@@ -0,0 +1,3988 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+<<<<<<<
+=======
+#include <linux/clk.h>
+>>>>>>>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "clk-alpha-pll.h"
+#include "gdsc.h"
+#include "reset.h"
+
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+
+<<<<<<<
+=======
+static struct freq_tbl cxo_safe_src_f = {
+ .pre_div = 1,
+};
+
+>>>>>>>
+enum {
+ P_BI_TCXO,
+ P_AUD_REF_CLK,
+ P_CORE_BI_PLL_TEST_SE,
+ P_GPLL0_OUT_EVEN,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL4_OUT_MAIN,
+ P_SLEEP_CLK,
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_0[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll0_out_even",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_1[] = {
+ "bi_tcxo",
+ "gpll0",
+ "core_pi_sleep_clk",
+ "gpll0_out_even",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_2[] = {
+ "bi_tcxo",
+ "core_pi_sleep_clk",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_3[] = {
+ "bi_tcxo",
+ "gpll0",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_4[] = {
+ "bi_tcxo",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_5[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll4",
+ "gpll0_out_even",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_AUD_REF_CLK, 2 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_6[] = {
+ "bi_tcxo",
+ "gpll0",
+ "aud_ref_clk",
+ "gpll0_out_even",
+ "core_bi_pll_test_se",
+};
+
+static const char * const gcc_parent_names_7[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll0_out_even",
+ "core_bi_pll_test_se",
+};
+
+static const char * const gcc_parent_names_8[] = {
+ "bi_tcxo",
+ "gpll0",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_10[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll4",
+ "gpll0_out_even",
+ "core_bi_pll_test_se",
+};
+
+<<<<<<<
+=======
+static struct clk_fixed_factor bi_tcxo = {
+ .mult = 1,
+ .div = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "bi_tcxo",
+ .parent_names = (const char *[]){ "xo_board" },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+>>>>>>>
+static struct clk_alpha_pll gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_names = (const char *[]){ "bi_tcxo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll4 = {
+ .offset = 0x76000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll4",
+ .parent_names = (const char *[]){ "bi_tcxo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_fabia_even[] = {
+ { 0x0, 1 },
+ { 0x1, 2 },
+ { 0x3, 4 },
+ { 0x7, 8 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_fabia_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_even",
+ .parent_names = (const char *[]){ "gpll0" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
+ .cmd_rcgr = 0x48014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk_src",
+ .parent_names = gcc_parent_names_7,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_cpuss_rbcpr_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_cpuss_rbcpr_clk_src = {
+ .cmd_rcgr = 0x4815c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_rbcpr_clk_src",
+ .parent_names = gcc_parent_names_8,
+ .num_parents = 3,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x64004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 5,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x65004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 5,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x66004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 5,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0x6b028,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_aux_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 3,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_aux_clk_src = {
+ .cmd_rcgr = 0x8d028,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_aux_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 3,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_phy_refgen_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_phy_refgen_clk_src = {
+ .cmd_rcgr = 0x6f014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_phy_refgen_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_phy_refgen_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x33010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(80000000, P_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(102400000, P_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(120000000, P_GPLL0_OUT_EVEN, 2.5, 0, 0),
+ F(128000000, P_GPLL0_OUT_MAIN, 1, 16, 75),
+ { }
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x17034,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x17164,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+ .cmd_rcgr = 0x17294,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x173c4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x174f4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x17624,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
+ .cmd_rcgr = 0x17754,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s6_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
+ .cmd_rcgr = 0x17884,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s7_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x18018,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x18148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
+ .cmd_rcgr = 0x18278,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x183a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x184d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x18608,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
+ .cmd_rcgr = 0x18738,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s6_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
+ .cmd_rcgr = 0x18868,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s7_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(201500000, P_GPLL4_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x1400c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_10,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_names = gcc_parent_names_10,
+ .num_parents = 5,
+<<<<<<<
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+=======
+ .ops = &clk_rcg2_ops,
+>>>>>>>
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc4_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
+ .cmd_rcgr = 0x1600c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_sdcc4_apps_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+=======
+ .ops = &clk_rcg2_ops,
+>>>>>>>
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_tsif_ref_clk_src[] = {
+ F(105495, P_BI_TCXO, 2, 1, 91),
+ { }
+};
+
+static struct clk_rcg2 gcc_tsif_ref_clk_src = {
+ .cmd_rcgr = 0x36010,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_tsif_ref_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ref_clk_src",
+ .parent_names = gcc_parent_names_6,
+ .num_parents = 5,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_axi_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_axi_clk_src = {
+ .cmd_rcgr = 0x7501c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_axi_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_axi_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_ice_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_ice_core_clk_src = {
+ .cmd_rcgr = 0x7505c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_ice_core_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_card_phy_aux_clk_src = {
+ .cmd_rcgr = 0x75090,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_phy_aux_clk_src",
+ .parent_names = gcc_parent_names_4,
+ .num_parents = 2,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_unipro_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_unipro_core_clk_src = {
+ .cmd_rcgr = 0x75074,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_unipro_core_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_unipro_core_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x7701c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x7705c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x77090,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_names = gcc_parent_names_4,
+ .num_parents = 2,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x77074,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_unipro_core_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(33333333, P_GPLL0_OUT_EVEN, 9, 0, 0),
+ F(66666667, P_GPLL0_OUT_EVEN, 4.5, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0xf018,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_mock_utmi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(20000000, P_GPLL0_OUT_EVEN, 15, 0, 0),
+ F(40000000, P_GPLL0_OUT_EVEN, 7.5, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xf030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_sec_master_clk_src = {
+ .cmd_rcgr = 0x10018,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_master_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_sec_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x10030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_mock_utmi_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0xf05c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 3,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_sec_phy_aux_clk_src = {
+ .cmd_rcgr = 0x1005c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_aux_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 3,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_vs_ctrl_clk_src = {
+ .cmd_rcgr = 0x7a030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_vs_ctrl_clk_src",
+ .parent_names = gcc_parent_names_3,
+ .num_parents = 3,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_vsensor_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ F(600000000, P_GPLL0_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_vsensor_clk_src = {
+ .cmd_rcgr = 0x7a018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_vsensor_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_vsensor_clk_src",
+ .parent_names = gcc_parent_names_8,
+ .num_parents = 3,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_tbu_clk = {
+ .halt_reg = 0x90014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x90014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_noc_pcie_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_card_axi_clk = {
+ .halt_reg = 0x82028,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x82028,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x82028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_card_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_card_axi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+ .halt_reg = 0x82024,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x82024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x82024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_phy_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_phy_axi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0x8201c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_prim_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_sec_axi_clk = {
+ .halt_reg = 0x82020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x82020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_usb3_sec_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_sec_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_apc_vs_clk = {
+ .halt_reg = 0x7a050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_apc_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x38004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x38004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_ahb_clk = {
+ .halt_reg = 0xb008,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_ahb_clk",
+<<<<<<<
+=======
+ .flags = CLK_IS_CRITICAL,
+>>>>>>>
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_axi_clk = {
+ .halt_reg = 0xb020,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0xb020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_xo_clk = {
+ .halt_reg = 0xb02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_xo_clk",
+<<<<<<<
+=======
+ .flags = CLK_IS_CRITICAL,
+>>>>>>>
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_ahb_clk = {
+ .halt_reg = 0x4100c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x4100c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_axi_clk = {
+ .halt_reg = 0x41008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_clk = {
+ .halt_reg = 0x41004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x502c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x502c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_prim_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_sec_axi_clk = {
+ .halt_reg = 0x5030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_sec_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_sec_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_ahb_clk = {
+ .halt_reg = 0x48000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk",
+ .parent_names = (const char *[]){
+ "gcc_cpuss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+<<<<<<<
+=======
+static struct clk_branch gcc_cpuss_dvm_bus_clk = {
+ .halt_reg = 0x48190,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x48190,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_dvm_bus_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_gnoc_clk = {
+ .halt_reg = 0x48004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x48004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_gnoc_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+>>>>>>>
+static struct clk_branch gcc_cpuss_rbcpr_clk = {
+ .halt_reg = 0x48008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x48008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_rbcpr_clk",
+ .parent_names = (const char *[]){
+ "gcc_cpuss_rbcpr_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_gpu_axi_clk = {
+ .halt_reg = 0x44038,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x44038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ddrss_gpu_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_ahb_clk = {
+ .halt_reg = 0xb00c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb00c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_ahb_clk",
+<<<<<<<
+=======
+ .flags = CLK_IS_CRITICAL,
+>>>>>>>
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_axi_clk = {
+ .halt_reg = 0xb024,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0xb024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_gpll0_clk_src = {
+<<<<<<<
+=======
+ .halt_reg = 0x52004,
+>>>>>>>
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(18),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_gpll0_clk_src",
+ .parent_names = (const char *[]){
+ "gpll0",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_gpll0_div_clk_src = {
+<<<<<<<
+=======
+ .halt_reg = 0x52004,
+>>>>>>>
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_gpll0_div_clk_src",
+ .parent_names = (const char *[]){
+ "gpll0_out_even",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_xo_clk = {
+ .halt_reg = 0xb030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_xo_clk",
+<<<<<<<
+=======
+ .flags = CLK_IS_CRITICAL,
+>>>>>>>
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x64000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x64000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_names = (const char *[]){
+ "gcc_gp1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x65000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x65000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_names = (const char *[]){
+ "gcc_gp2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x66000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x66000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_names = (const char *[]){
+ "gcc_gp3_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_cfg_ahb_clk = {
+ .halt_reg = 0x71004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x71004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_cfg_ahb_clk",
+<<<<<<<
+=======
+ .flags = CLK_IS_CRITICAL,
+>>>>>>>
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+<<<<<<<
+=======
+ .halt_reg = 0x52004,
+>>>>>>>
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_names = (const char *[]){
+ "gpll0",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+<<<<<<<
+=======
+ .halt_reg = 0x52004,
+>>>>>>>
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_names = (const char *[]){
+ "gpll0_out_even",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_iref_clk = {
+ .halt_reg = 0x8c010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_iref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+ .halt_reg = 0x7100c,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x7100c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x71018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x71018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_vs_clk = {
+ .halt_reg = 0x7a04c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a04c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_axis2_clk = {
+ .halt_reg = 0x8a008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8a008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_axis2_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_cfg_ahb_clk = {
+ .halt_reg = 0x8a000,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x8a000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x8a000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_gpll0_div_clk_src = {
+<<<<<<<
+=======
+ .halt_reg = 0x52004,
+>>>>>>>
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_gpll0_div_clk_src",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_mfab_axis_clk = {
+ .halt_reg = 0x8a004,
+ .halt_check = BRANCH_VOTED,
+ .hwcg_reg = 0x8a004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x8a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_mfab_axis_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_q6_memnoc_axi_clk = {
+ .halt_reg = 0x8a154,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x8a154,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_q6_memnoc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_snoc_axi_clk = {
+ .halt_reg = 0x8a150,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8a150,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_snoc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_vs_clk = {
+ .halt_reg = 0x7a048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0x6b01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_pcie_0_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0x6b018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_clkref_clk = {
+ .halt_reg = 0x8c00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0x6b014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+<<<<<<<
+ .halt_check = BRANCH_HALT_SKIP,
+=======
+ .halt_reg = 0x6b020,
+ .halt_check = BRANCH_HALT_DELAY,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0x6b010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = {
+ .halt_reg = 0x6b00c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_aux_clk = {
+ .halt_reg = 0x8d01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(29),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_pcie_1_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
+ .halt_reg = 0x8d018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8d018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(28),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_clkref_clk = {
+ .halt_reg = 0x8c02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
+ .halt_reg = 0x8d014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_pipe_clk = {
+<<<<<<<
+ .halt_check = BRANCH_HALT_SKIP,
+=======
+ .halt_reg = 0x8d020,
+ .halt_check = BRANCH_HALT_DELAY,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(30),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_axi_clk = {
+ .halt_reg = 0x8d010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8d010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_q2a_axi_clk = {
+ .halt_reg = 0x8d00c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_aux_clk = {
+ .halt_reg = 0x6f004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6f004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_phy_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_pcie_0_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_refgen_clk = {
+ .halt_reg = 0x6f02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6f02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_phy_refgen_clk",
+ .parent_names = (const char *[]){
+ "gcc_pcie_phy_refgen_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_names = (const char *[]){
+ "gcc_pdm2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x33004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x33004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x33004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x33008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x34004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x34004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_ahb_clk = {
+ .halt_reg = 0xb014,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_camera_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp_ahb_clk = {
+ .halt_reg = 0xb018,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_disp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_ahb_clk = {
+ .halt_reg = 0xb010,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_video_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x17030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x17160,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x17290,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x173c0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s3_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x174f0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s4_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x17620,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s5_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s6_clk = {
+ .halt_reg = 0x17750,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s6_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s6_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s7_clk = {
+ .halt_reg = 0x17880,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s7_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s7_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x18014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap1_s0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x18144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(23),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap1_s1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x18274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(24),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap1_s2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x183a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap1_s3_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x184d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap1_s4_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x18604,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap1_s5_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s6_clk = {
+ .halt_reg = 0x18734,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(28),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s6_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap1_s6_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s7_clk = {
+ .halt_reg = 0x18864,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(29),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s7_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap1_s7_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x17008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x1800c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x18010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x18010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x14008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x14004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_names = (const char *[]){
+ "gcc_sdcc2_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_ahb_clk = {
+ .halt_reg = 0x16008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_apps_clk = {
+ .halt_reg = 0x16004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk",
+ .parent_names = (const char *[]){
+ "gcc_sdcc4_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = {
+ .halt_reg = 0x414c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sys_noc_cpuss_ahb_clk",
+ .parent_names = (const char *[]){
+ "gcc_cpuss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_ahb_clk = {
+ .halt_reg = 0x36004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_inactivity_timers_clk = {
+ .halt_reg = 0x3600c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_inactivity_timers_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_ref_clk = {
+ .halt_reg = 0x36008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ref_clk",
+ .parent_names = (const char *[]){
+ "gcc_tsif_ref_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_ahb_clk = {
+ .halt_reg = 0x75010,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x75010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_axi_clk = {
+ .halt_reg = 0x7500c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7500c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7500c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_card_axi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_clkref_clk = {
+ .halt_reg = 0x8c004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_ice_core_clk = {
+ .halt_reg = 0x75058,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x75058,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_ice_core_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_card_ice_core_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_phy_aux_clk = {
+ .halt_reg = 0x7508c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7508c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7508c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_phy_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_card_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_rx_symbol_0_clk = {
+<<<<<<<
+ .halt_check = BRANCH_HALT_SKIP,
+=======
+ .halt_reg = 0x75018,
+ .halt_check = BRANCH_HALT_DELAY,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0x75018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_rx_symbol_1_clk = {
+<<<<<<<
+ .halt_check = BRANCH_HALT_SKIP,
+=======
+ .halt_reg = 0x750a8,
+ .halt_check = BRANCH_HALT_DELAY,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0x750a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_rx_symbol_1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_tx_symbol_0_clk = {
+<<<<<<<
+ .halt_check = BRANCH_HALT_SKIP,
+=======
+ .halt_reg = 0x75014,
+ .halt_check = BRANCH_HALT_DELAY,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0x75014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_unipro_core_clk = {
+ .halt_reg = 0x75054,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x75054,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_unipro_core_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_card_unipro_core_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_mem_clkref_clk = {
+ .halt_reg = 0x8c000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_mem_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0x77010,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x7700c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7700c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7700c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_phy_axi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x77058,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77058,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_phy_ice_core_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x7708c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7708c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7708c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_phy_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+<<<<<<<
+ .halt_check = BRANCH_HALT_SKIP,
+=======
+ .halt_reg = 0x77018,
+ .halt_check = BRANCH_HALT_DELAY,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0x77018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = {
+<<<<<<<
+ .halt_check = BRANCH_HALT_SKIP,
+=======
+ .halt_reg = 0x770a8,
+ .halt_check = BRANCH_HALT_DELAY,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0x770a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+<<<<<<<
+ .halt_check = BRANCH_HALT_SKIP,
+=======
+ .halt_reg = 0x77014,
+ .halt_check = BRANCH_HALT_DELAY,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0x77014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x77054,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77054,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_phy_unipro_core_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0xf00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_prim_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0xf014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_prim_mock_utmi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0xf010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_master_clk = {
+ .halt_reg = 0x1000c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1000c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_master_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_sec_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_mock_utmi_clk = {
+ .halt_reg = 0x10014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_mock_utmi_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_sec_mock_utmi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_sleep_clk = {
+ .halt_reg = 0x10010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_clkref_clk = {
+ .halt_reg = 0x8c008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0xf04c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf04c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb3_prim_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0xf050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb3_prim_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+<<<<<<<
+ .halt_check = BRANCH_HALT_SKIP,
+=======
+ .halt_reg = 0xf054,
+ .halt_check = BRANCH_HALT_DELAY,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0xf054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_clkref_clk = {
+ .halt_reg = 0x8c028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_aux_clk = {
+ .halt_reg = 0x1004c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1004c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb3_sec_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_com_aux_clk = {
+ .halt_reg = 0x10050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_com_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb3_sec_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_pipe_clk = {
+<<<<<<<
+ .halt_check = BRANCH_HALT_SKIP,
+=======
+ .halt_reg = 0x10054,
+ .halt_check = BRANCH_HALT_DELAY,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0x10054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = {
+ .halt_reg = 0x6a004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x6a004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_phy_cfg_ahb2phy_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vdda_vs_clk = {
+ .halt_reg = 0x7a00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vdda_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vddcx_vs_clk = {
+ .halt_reg = 0x7a004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vddcx_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vddmx_vs_clk = {
+ .halt_reg = 0x7a008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vddmx_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_ahb_clk = {
+ .halt_reg = 0xb004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_ahb_clk",
+<<<<<<<
+=======
+ .flags = CLK_IS_CRITICAL,
+>>>>>>>
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi_clk = {
+ .halt_reg = 0xb01c,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0xb01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_xo_clk = {
+ .halt_reg = 0xb028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_xo_clk",
+<<<<<<<
+=======
+ .flags = CLK_IS_CRITICAL,
+>>>>>>>
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vs_ctrl_ahb_clk = {
+ .halt_reg = 0x7a014,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7a014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7a014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vs_ctrl_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vs_ctrl_clk = {
+ .halt_reg = 0x7a010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vs_ctrl_clk",
+ .parent_names = (const char *[]){
+ "gcc_vs_ctrl_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc pcie_0_gdsc = {
+ .gdscr = 0x6b004,
+ .pd = {
+ .name = "pcie_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = POLL_CFG_GDSCR,
+>>>>>>>
+};
+
+static struct gdsc pcie_1_gdsc = {
+ .gdscr = 0x8d004,
+ .pd = {
+ .name = "pcie_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = POLL_CFG_GDSCR,
+>>>>>>>
+};
+
+static struct gdsc ufs_card_gdsc = {
+ .gdscr = 0x75004,
+ .pd = {
+ .name = "ufs_card_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = POLL_CFG_GDSCR,
+>>>>>>>
+};
+
+static struct gdsc ufs_phy_gdsc = {
+ .gdscr = 0x77004,
+ .pd = {
+ .name = "ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = POLL_CFG_GDSCR,
+>>>>>>>
+};
+
+static struct gdsc usb30_prim_gdsc = {
+ .gdscr = 0xf004,
+ .pd = {
+ .name = "usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = POLL_CFG_GDSCR,
+>>>>>>>
+};
+
+static struct gdsc usb30_sec_gdsc = {
+ .gdscr = 0x10004,
+ .pd = {
+ .name = "usb30_sec_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = POLL_CFG_GDSCR,
+>>>>>>>
+};
+
+static struct gdsc hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc = {
+ .gdscr = 0x7d030,
+ .pd = {
+ .name = "hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = VOTABLE,
+>>>>>>>
+};
+
+static struct gdsc hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc = {
+ .gdscr = 0x7d03c,
+ .pd = {
+ .name = "hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = VOTABLE,
+>>>>>>>
+};
+
+static struct gdsc hlos1_vote_aggre_noc_mmu_tbu1_gdsc = {
+ .gdscr = 0x7d034,
+ .pd = {
+ .name = "hlos1_vote_aggre_noc_mmu_tbu1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = VOTABLE,
+>>>>>>>
+};
+
+static struct gdsc hlos1_vote_aggre_noc_mmu_tbu2_gdsc = {
+ .gdscr = 0x7d038,
+ .pd = {
+ .name = "hlos1_vote_aggre_noc_mmu_tbu2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = VOTABLE,
+>>>>>>>
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = {
+ .gdscr = 0x7d040,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = VOTABLE,
+>>>>>>>
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = {
+ .gdscr = 0x7d048,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = VOTABLE,
+>>>>>>>
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = {
+ .gdscr = 0x7d044,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_sf_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = VOTABLE,
+};
+
+static struct clk_hw *gcc_sdm845_hws[] = {
+ [BI_TCXO] = &bi_tcxo.hw,
+>>>>>>>
+};
+
+static struct clk_regmap *gcc_sdm845_clocks[] = {
+ [GCC_AGGRE_NOC_PCIE_TBU_CLK] = &gcc_aggre_noc_pcie_tbu_clk.clkr,
+ [GCC_AGGRE_UFS_CARD_AXI_CLK] = &gcc_aggre_ufs_card_axi_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_AGGRE_USB3_SEC_AXI_CLK] = &gcc_aggre_usb3_sec_axi_clk.clkr,
+ [GCC_APC_VS_CLK] = &gcc_apc_vs_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_AHB_CLK] = &gcc_camera_ahb_clk.clkr,
+ [GCC_CAMERA_AXI_CLK] = &gcc_camera_axi_clk.clkr,
+ [GCC_CAMERA_XO_CLK] = &gcc_camera_xo_clk.clkr,
+ [GCC_CE1_AHB_CLK] = &gcc_ce1_ahb_clk.clkr,
+ [GCC_CE1_AXI_CLK] = &gcc_ce1_axi_clk.clkr,
+ [GCC_CE1_CLK] = &gcc_ce1_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_SEC_AXI_CLK] = &gcc_cfg_noc_usb3_sec_axi_clk.clkr,
+ [GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr,
+ [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr,
+<<<<<<<
+=======
+ [GCC_CPUSS_DVM_BUS_CLK] = &gcc_cpuss_dvm_bus_clk.clkr,
+ [GCC_CPUSS_GNOC_CLK] = &gcc_cpuss_gnoc_clk.clkr,
+>>>>>>>
+ [GCC_CPUSS_RBCPR_CLK] = &gcc_cpuss_rbcpr_clk.clkr,
+ [GCC_CPUSS_RBCPR_CLK_SRC] = &gcc_cpuss_rbcpr_clk_src.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DISP_AHB_CLK] = &gcc_disp_ahb_clk.clkr,
+ [GCC_DISP_AXI_CLK] = &gcc_disp_axi_clk.clkr,
+ [GCC_DISP_GPLL0_CLK_SRC] = &gcc_disp_gpll0_clk_src.clkr,
+ [GCC_DISP_GPLL0_DIV_CLK_SRC] = &gcc_disp_gpll0_div_clk_src.clkr,
+ [GCC_DISP_XO_CLK] = &gcc_disp_xo_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_GPU_IREF_CLK] = &gcc_gpu_iref_clk.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_GPU_VS_CLK] = &gcc_gpu_vs_clk.clkr,
+ [GCC_MSS_AXIS2_CLK] = &gcc_mss_axis2_clk.clkr,
+ [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
+ [GCC_MSS_GPLL0_DIV_CLK_SRC] = &gcc_mss_gpll0_div_clk_src.clkr,
+ [GCC_MSS_MFAB_AXIS_CLK] = &gcc_mss_mfab_axis_clk.clkr,
+ [GCC_MSS_Q6_MEMNOC_AXI_CLK] = &gcc_mss_q6_memnoc_axi_clk.clkr,
+ [GCC_MSS_SNOC_AXI_CLK] = &gcc_mss_snoc_axi_clk.clkr,
+ [GCC_MSS_VS_CLK] = &gcc_mss_vs_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_CLKREF_CLK] = &gcc_pcie_0_clkref_clk.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK_SRC] = &gcc_pcie_1_aux_clk_src.clkr,
+ [GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr,
+ [GCC_PCIE_1_CLKREF_CLK] = &gcc_pcie_1_clkref_clk.clkr,
+ [GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr,
+ [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr,
+ [GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr,
+ [GCC_PCIE_1_SLV_Q2A_AXI_CLK] = &gcc_pcie_1_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_PHY_AUX_CLK] = &gcc_pcie_phy_aux_clk.clkr,
+ [GCC_PCIE_PHY_REFGEN_CLK] = &gcc_pcie_phy_refgen_clk.clkr,
+ [GCC_PCIE_PHY_REFGEN_CLK_SRC] = &gcc_pcie_phy_refgen_clk_src.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_AHB_CLK] = &gcc_qmip_camera_ahb_clk.clkr,
+ [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_AHB_CLK] = &gcc_qmip_video_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK] = &gcc_qupv3_wrap0_s6_clk.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK_SRC] = &gcc_qupv3_wrap0_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK] = &gcc_qupv3_wrap0_s7_clk.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK_SRC] = &gcc_qupv3_wrap0_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK] = &gcc_qupv3_wrap1_s6_clk.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK_SRC] = &gcc_qupv3_wrap1_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK] = &gcc_qupv3_wrap1_s7_clk.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK_SRC] = &gcc_qupv3_wrap1_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
+ [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
+ [GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdcc4_apps_clk_src.clkr,
+ [GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr,
+ [GCC_TSIF_AHB_CLK] = &gcc_tsif_ahb_clk.clkr,
+ [GCC_TSIF_INACTIVITY_TIMERS_CLK] =
+ &gcc_tsif_inactivity_timers_clk.clkr,
+ [GCC_TSIF_REF_CLK] = &gcc_tsif_ref_clk.clkr,
+ [GCC_TSIF_REF_CLK_SRC] = &gcc_tsif_ref_clk_src.clkr,
+ [GCC_UFS_CARD_AHB_CLK] = &gcc_ufs_card_ahb_clk.clkr,
+ [GCC_UFS_CARD_AXI_CLK] = &gcc_ufs_card_axi_clk.clkr,
+ [GCC_UFS_CARD_AXI_CLK_SRC] = &gcc_ufs_card_axi_clk_src.clkr,
+ [GCC_UFS_CARD_CLKREF_CLK] = &gcc_ufs_card_clkref_clk.clkr,
+ [GCC_UFS_CARD_ICE_CORE_CLK] = &gcc_ufs_card_ice_core_clk.clkr,
+ [GCC_UFS_CARD_ICE_CORE_CLK_SRC] = &gcc_ufs_card_ice_core_clk_src.clkr,
+ [GCC_UFS_CARD_PHY_AUX_CLK] = &gcc_ufs_card_phy_aux_clk.clkr,
+ [GCC_UFS_CARD_PHY_AUX_CLK_SRC] = &gcc_ufs_card_phy_aux_clk_src.clkr,
+ [GCC_UFS_CARD_RX_SYMBOL_0_CLK] = &gcc_ufs_card_rx_symbol_0_clk.clkr,
+ [GCC_UFS_CARD_RX_SYMBOL_1_CLK] = &gcc_ufs_card_rx_symbol_1_clk.clkr,
+ [GCC_UFS_CARD_TX_SYMBOL_0_CLK] = &gcc_ufs_card_tx_symbol_0_clk.clkr,
+ [GCC_UFS_CARD_UNIPRO_CORE_CLK] = &gcc_ufs_card_unipro_core_clk.clkr,
+ [GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC] =
+ &gcc_ufs_card_unipro_core_clk_src.clkr,
+ [GCC_UFS_MEM_CLKREF_CLK] = &gcc_ufs_mem_clkref_clk.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] =
+ &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] =
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB30_SEC_MASTER_CLK] = &gcc_usb30_sec_master_clk.clkr,
+ [GCC_USB30_SEC_MASTER_CLK_SRC] = &gcc_usb30_sec_master_clk_src.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_CLK] = &gcc_usb30_sec_mock_utmi_clk.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_CLK_SRC] =
+ &gcc_usb30_sec_mock_utmi_clk_src.clkr,
+ [GCC_USB30_SEC_SLEEP_CLK] = &gcc_usb30_sec_sleep_clk.clkr,
+ [GCC_USB3_PRIM_CLKREF_CLK] = &gcc_usb3_prim_clkref_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB3_SEC_CLKREF_CLK] = &gcc_usb3_sec_clkref_clk.clkr,
+ [GCC_USB3_SEC_PHY_AUX_CLK] = &gcc_usb3_sec_phy_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_AUX_CLK_SRC] = &gcc_usb3_sec_phy_aux_clk_src.clkr,
+ [GCC_USB3_SEC_PHY_COM_AUX_CLK] = &gcc_usb3_sec_phy_com_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_PIPE_CLK] = &gcc_usb3_sec_phy_pipe_clk.clkr,
+ [GCC_USB_PHY_CFG_AHB2PHY_CLK] = &gcc_usb_phy_cfg_ahb2phy_clk.clkr,
+ [GCC_VDDA_VS_CLK] = &gcc_vdda_vs_clk.clkr,
+ [GCC_VDDCX_VS_CLK] = &gcc_vddcx_vs_clk.clkr,
+ [GCC_VDDMX_VS_CLK] = &gcc_vddmx_vs_clk.clkr,
+ [GCC_VIDEO_AHB_CLK] = &gcc_video_ahb_clk.clkr,
+ [GCC_VIDEO_AXI_CLK] = &gcc_video_axi_clk.clkr,
+ [GCC_VIDEO_XO_CLK] = &gcc_video_xo_clk.clkr,
+ [GCC_VS_CTRL_AHB_CLK] = &gcc_vs_ctrl_ahb_clk.clkr,
+ [GCC_VS_CTRL_CLK] = &gcc_vs_ctrl_clk.clkr,
+ [GCC_VS_CTRL_CLK_SRC] = &gcc_vs_ctrl_clk_src.clkr,
+ [GCC_VSENSOR_CLK_SRC] = &gcc_vsensor_clk_src.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_OUT_EVEN] = &gpll0_out_even.clkr,
+ [GPLL4] = &gpll4.clkr,
+};
+
+static const struct qcom_reset_map gcc_sdm845_resets[] = {
+ [GCC_MMSS_BCR] = { 0xb000 },
+ [GCC_PCIE_0_BCR] = { 0x6b000 },
+ [GCC_PCIE_1_BCR] = { 0x8d000 },
+ [GCC_PCIE_PHY_BCR] = { 0x6f000 },
+ [GCC_PDM_BCR] = { 0x33000 },
+ [GCC_PRNG_BCR] = { 0x34000 },
+ [GCC_QUPV3_WRAPPER_0_BCR] = { 0x17000 },
+ [GCC_QUPV3_WRAPPER_1_BCR] = { 0x18000 },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x12000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0x12004 },
+ [GCC_SDCC2_BCR] = { 0x14000 },
+ [GCC_SDCC4_BCR] = { 0x16000 },
+ [GCC_TSIF_BCR] = { 0x36000 },
+ [GCC_UFS_CARD_BCR] = { 0x75000 },
+ [GCC_UFS_PHY_BCR] = { 0x77000 },
+ [GCC_USB30_PRIM_BCR] = { 0xf000 },
+ [GCC_USB30_SEC_BCR] = { 0x10000 },
+ [GCC_USB3_PHY_PRIM_BCR] = { 0x50000 },
+ [GCC_USB3PHY_PHY_PRIM_BCR] = { 0x50004 },
+ [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x50008 },
+ [GCC_USB3_PHY_SEC_BCR] = { 0x5000c },
+ [GCC_USB3PHY_PHY_SEC_BCR] = { 0x50010 },
+ [GCC_USB3_DP_PHY_SEC_BCR] = { 0x50014 },
+ [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 },
+ [GCC_PCIE_0_PHY_BCR] = { 0x6c01c },
+ [GCC_PCIE_1_PHY_BCR] = { 0x8e01c },
+};
+
+static struct gdsc *gcc_sdm845_gdscs[] = {
+ [PCIE_0_GDSC] = &pcie_0_gdsc,
+ [PCIE_1_GDSC] = &pcie_1_gdsc,
+ [UFS_CARD_GDSC] = &ufs_card_gdsc,
+ [UFS_PHY_GDSC] = &ufs_phy_gdsc,
+ [USB30_PRIM_GDSC] = &usb30_prim_gdsc,
+ [USB30_SEC_GDSC] = &usb30_sec_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_AUDIO_TBU_GDSC] =
+ &hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_PCIE_TBU_GDSC] =
+ &hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_TBU1_GDSC] =
+ &hlos1_vote_aggre_noc_mmu_tbu1_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_TBU2_GDSC] =
+ &hlos1_vote_aggre_noc_mmu_tbu2_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC] =
+ &hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC] =
+ &hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_SF_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_sf_gdsc,
+};
+
+static const struct regmap_config gcc_sdm845_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x182090,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_sdm845_desc = {
+ .config = &gcc_sdm845_regmap_config,
+ .clks = gcc_sdm845_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sdm845_clocks),
+ .resets = gcc_sdm845_resets,
+ .num_resets = ARRAY_SIZE(gcc_sdm845_resets),
+ .gdscs = gcc_sdm845_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sdm845_gdscs),
+};
+
+static const struct of_device_id gcc_sdm845_match_table[] = {
+ { .compatible = "qcom,gcc-sdm845" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_sdm845_match_table);
+
+static int gcc_sdm845_probe(struct platform_device *pdev)
+{
+<<<<<<<
+ struct device *dev = &pdev->dev;
+ struct regmap *regmap;
+ int i, ret;
+=======
+ struct regmap *regmap;
+>>>>>>>
+
+ regmap = qcom_cc_map(pdev, &gcc_sdm845_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+<<<<<<<
+ /* Disable the GPLL0 active input to MMSS and GPU via MISC registers */
+ regmap_update_bits(regmap, 0x09ffc, 0x3, 0x3);
+ regmap_update_bits(regmap, 0x71028, 0x3, 0x3);
+
+ /* Enable CPUSS clocks */
+ regmap_update_bits(regmap, 0x48190, BIT(0), 0x1);
+ regmap_update_bits(regmap, 0x52004, BIT(22), 0x1);
+
+=======
+ for (i = 0; i < ARRAY_SIZE(gcc_sdm845_hws); i++) {
+ ret = devm_clk_hw_register(dev, gcc_sdm845_hws[i]);
+ if (ret)
+ return ret;
+ }
+
+ /* Get the rate for safe source */
+ cxo_safe_src_f.freq = clk_get_rate(bi_tcxo.hw.clk);
+
+ /* Disable the GPLL0 active input to MMSS and GPU via MISC registers */
+ regmap_update_bits(regmap, 0x09FFC, 0x3, 0x3);
+ regmap_update_bits(regmap, 0x71028, 0x3, 0x3);
+
+>>>>>>>
+ return qcom_cc_really_probe(pdev, &gcc_sdm845_desc, regmap);
+}
+
+static struct platform_driver gcc_sdm845_driver = {
+ .probe = gcc_sdm845_probe,
+ .driver = {
+ .name = "gcc-sdm845",
+ .of_match_table = gcc_sdm845_match_table,
+ },
+};
+
+static int __init gcc_sdm845_init(void)
+{
+ return platform_driver_register(&gcc_sdm845_driver);
+}
+subsys_initcall(gcc_sdm845_init);
+
+static void __exit gcc_sdm845_exit(void)
+{
+ platform_driver_unregister(&gcc_sdm845_driver);
+}
+module_exit(gcc_sdm845_exit);
+
+MODULE_DESCRIPTION("QTI GCC SDM845 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:gcc-sdm845");
diff --git a/rr-cache/43c60db993e04ae4bbaad73360b168398f7c9aa4/preimage.3 b/rr-cache/43c60db993e04ae4bbaad73360b168398f7c9aa4/preimage.3
new file mode 100644
index 0000000..f0ffe96
--- /dev/null
+++ b/rr-cache/43c60db993e04ae4bbaad73360b168398f7c9aa4/preimage.3
@@ -0,0 +1,3988 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+<<<<<<<
+=======
+#include <linux/clk.h>
+>>>>>>>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "clk-alpha-pll.h"
+#include "gdsc.h"
+#include "reset.h"
+
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+
+<<<<<<<
+=======
+static struct freq_tbl cxo_safe_src_f = {
+ .pre_div = 1,
+};
+
+>>>>>>>
+enum {
+ P_BI_TCXO,
+ P_AUD_REF_CLK,
+ P_CORE_BI_PLL_TEST_SE,
+ P_GPLL0_OUT_EVEN,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL4_OUT_MAIN,
+ P_SLEEP_CLK,
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_0[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll0_out_even",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_1[] = {
+ "bi_tcxo",
+ "gpll0",
+ "core_pi_sleep_clk",
+ "gpll0_out_even",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_2[] = {
+ "bi_tcxo",
+ "core_pi_sleep_clk",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_3[] = {
+ "bi_tcxo",
+ "gpll0",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_4[] = {
+ "bi_tcxo",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_5[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll4",
+ "gpll0_out_even",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_AUD_REF_CLK, 2 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_6[] = {
+ "bi_tcxo",
+ "gpll0",
+ "aud_ref_clk",
+ "gpll0_out_even",
+ "core_bi_pll_test_se",
+};
+
+static const char * const gcc_parent_names_7[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll0_out_even",
+ "core_bi_pll_test_se",
+};
+
+static const char * const gcc_parent_names_8[] = {
+ "bi_tcxo",
+ "gpll0",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_10[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll4",
+ "gpll0_out_even",
+ "core_bi_pll_test_se",
+};
+
+<<<<<<<
+=======
+static struct clk_fixed_factor bi_tcxo = {
+ .mult = 1,
+ .div = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "bi_tcxo",
+ .parent_names = (const char *[]){ "xo_board" },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+>>>>>>>
+static struct clk_alpha_pll gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_names = (const char *[]){ "bi_tcxo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll4 = {
+ .offset = 0x76000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll4",
+ .parent_names = (const char *[]){ "bi_tcxo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_fabia_even[] = {
+ { 0x0, 1 },
+ { 0x1, 2 },
+ { 0x3, 4 },
+ { 0x7, 8 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_fabia_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_even",
+ .parent_names = (const char *[]){ "gpll0" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
+ .cmd_rcgr = 0x48014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk_src",
+ .parent_names = gcc_parent_names_7,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_cpuss_rbcpr_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_cpuss_rbcpr_clk_src = {
+ .cmd_rcgr = 0x4815c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_rbcpr_clk_src",
+ .parent_names = gcc_parent_names_8,
+ .num_parents = 3,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x64004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 5,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x65004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 5,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x66004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 5,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0x6b028,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_aux_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 3,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_aux_clk_src = {
+ .cmd_rcgr = 0x8d028,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_aux_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 3,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_phy_refgen_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_phy_refgen_clk_src = {
+ .cmd_rcgr = 0x6f014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_phy_refgen_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_phy_refgen_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x33010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(80000000, P_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(102400000, P_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(120000000, P_GPLL0_OUT_EVEN, 2.5, 0, 0),
+ F(128000000, P_GPLL0_OUT_MAIN, 1, 16, 75),
+ { }
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x17034,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x17164,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+ .cmd_rcgr = 0x17294,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x173c4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x174f4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x17624,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
+ .cmd_rcgr = 0x17754,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s6_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
+ .cmd_rcgr = 0x17884,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s7_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x18018,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x18148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
+ .cmd_rcgr = 0x18278,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x183a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x184d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x18608,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
+ .cmd_rcgr = 0x18738,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s6_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
+ .cmd_rcgr = 0x18868,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s7_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(201500000, P_GPLL4_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x1400c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_10,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_names = gcc_parent_names_10,
+ .num_parents = 5,
+<<<<<<<
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+=======
+ .ops = &clk_rcg2_ops,
+>>>>>>>
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc4_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
+ .cmd_rcgr = 0x1600c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_sdcc4_apps_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+=======
+ .ops = &clk_rcg2_ops,
+>>>>>>>
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_tsif_ref_clk_src[] = {
+ F(105495, P_BI_TCXO, 2, 1, 91),
+ { }
+};
+
+static struct clk_rcg2 gcc_tsif_ref_clk_src = {
+ .cmd_rcgr = 0x36010,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_tsif_ref_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ref_clk_src",
+ .parent_names = gcc_parent_names_6,
+ .num_parents = 5,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_axi_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_axi_clk_src = {
+ .cmd_rcgr = 0x7501c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_axi_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_axi_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_ice_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_ice_core_clk_src = {
+ .cmd_rcgr = 0x7505c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_ice_core_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_card_phy_aux_clk_src = {
+ .cmd_rcgr = 0x75090,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_phy_aux_clk_src",
+ .parent_names = gcc_parent_names_4,
+ .num_parents = 2,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_unipro_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_unipro_core_clk_src = {
+ .cmd_rcgr = 0x75074,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_unipro_core_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_unipro_core_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x7701c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x7705c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x77090,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_names = gcc_parent_names_4,
+ .num_parents = 2,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x77074,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_unipro_core_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(33333333, P_GPLL0_OUT_EVEN, 9, 0, 0),
+ F(66666667, P_GPLL0_OUT_EVEN, 4.5, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0xf018,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_mock_utmi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(20000000, P_GPLL0_OUT_EVEN, 15, 0, 0),
+ F(40000000, P_GPLL0_OUT_EVEN, 7.5, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xf030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_sec_master_clk_src = {
+ .cmd_rcgr = 0x10018,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_master_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_sec_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x10030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_mock_utmi_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0xf05c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 3,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_sec_phy_aux_clk_src = {
+ .cmd_rcgr = 0x1005c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
+<<<<<<<
+=======
+ .safe_src_freq_tbl = &cxo_safe_src_f,
+>>>>>>>
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_aux_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 3,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_vs_ctrl_clk_src = {
+ .cmd_rcgr = 0x7a030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_vs_ctrl_clk_src",
+ .parent_names = gcc_parent_names_3,
+ .num_parents = 3,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_vsensor_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ F(600000000, P_GPLL0_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_vsensor_clk_src = {
+ .cmd_rcgr = 0x7a018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_vsensor_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_vsensor_clk_src",
+ .parent_names = gcc_parent_names_8,
+ .num_parents = 3,
+<<<<<<<
+=======
+ .flags = CLK_SET_RATE_PARENT,
+>>>>>>>
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_tbu_clk = {
+ .halt_reg = 0x90014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x90014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_noc_pcie_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_card_axi_clk = {
+ .halt_reg = 0x82028,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x82028,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x82028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_card_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_card_axi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+ .halt_reg = 0x82024,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x82024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x82024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_phy_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_phy_axi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0x8201c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_prim_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_sec_axi_clk = {
+ .halt_reg = 0x82020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x82020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_usb3_sec_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_sec_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_apc_vs_clk = {
+ .halt_reg = 0x7a050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_apc_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x38004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x38004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_ahb_clk = {
+ .halt_reg = 0xb008,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_ahb_clk",
+<<<<<<<
+=======
+ .flags = CLK_IS_CRITICAL,
+>>>>>>>
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_axi_clk = {
+ .halt_reg = 0xb020,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0xb020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_xo_clk = {
+ .halt_reg = 0xb02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_xo_clk",
+<<<<<<<
+=======
+ .flags = CLK_IS_CRITICAL,
+>>>>>>>
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_ahb_clk = {
+ .halt_reg = 0x4100c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x4100c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_axi_clk = {
+ .halt_reg = 0x41008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_clk = {
+ .halt_reg = 0x41004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x502c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x502c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_prim_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_sec_axi_clk = {
+ .halt_reg = 0x5030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_sec_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_sec_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_ahb_clk = {
+ .halt_reg = 0x48000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk",
+ .parent_names = (const char *[]){
+ "gcc_cpuss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+<<<<<<<
+=======
+static struct clk_branch gcc_cpuss_dvm_bus_clk = {
+ .halt_reg = 0x48190,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x48190,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_dvm_bus_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_gnoc_clk = {
+ .halt_reg = 0x48004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x48004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_gnoc_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+>>>>>>>
+static struct clk_branch gcc_cpuss_rbcpr_clk = {
+ .halt_reg = 0x48008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x48008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_rbcpr_clk",
+ .parent_names = (const char *[]){
+ "gcc_cpuss_rbcpr_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_gpu_axi_clk = {
+ .halt_reg = 0x44038,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x44038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ddrss_gpu_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_ahb_clk = {
+ .halt_reg = 0xb00c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb00c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_ahb_clk",
+<<<<<<<
+=======
+ .flags = CLK_IS_CRITICAL,
+>>>>>>>
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_axi_clk = {
+ .halt_reg = 0xb024,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0xb024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_gpll0_clk_src = {
+<<<<<<<
+=======
+ .halt_reg = 0x52004,
+>>>>>>>
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(18),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_gpll0_clk_src",
+ .parent_names = (const char *[]){
+ "gpll0",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_gpll0_div_clk_src = {
+<<<<<<<
+=======
+ .halt_reg = 0x52004,
+>>>>>>>
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_gpll0_div_clk_src",
+ .parent_names = (const char *[]){
+ "gpll0_out_even",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_xo_clk = {
+ .halt_reg = 0xb030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_xo_clk",
+<<<<<<<
+=======
+ .flags = CLK_IS_CRITICAL,
+>>>>>>>
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x64000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x64000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_names = (const char *[]){
+ "gcc_gp1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x65000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x65000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_names = (const char *[]){
+ "gcc_gp2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x66000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x66000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_names = (const char *[]){
+ "gcc_gp3_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_cfg_ahb_clk = {
+ .halt_reg = 0x71004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x71004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_cfg_ahb_clk",
+<<<<<<<
+=======
+ .flags = CLK_IS_CRITICAL,
+>>>>>>>
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+<<<<<<<
+=======
+ .halt_reg = 0x52004,
+>>>>>>>
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_names = (const char *[]){
+ "gpll0",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+<<<<<<<
+=======
+ .halt_reg = 0x52004,
+>>>>>>>
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_names = (const char *[]){
+ "gpll0_out_even",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_iref_clk = {
+ .halt_reg = 0x8c010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_iref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+ .halt_reg = 0x7100c,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x7100c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x71018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x71018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_vs_clk = {
+ .halt_reg = 0x7a04c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a04c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_axis2_clk = {
+ .halt_reg = 0x8a008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8a008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_axis2_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_cfg_ahb_clk = {
+ .halt_reg = 0x8a000,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x8a000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x8a000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_gpll0_div_clk_src = {
+<<<<<<<
+=======
+ .halt_reg = 0x52004,
+>>>>>>>
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_gpll0_div_clk_src",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_mfab_axis_clk = {
+ .halt_reg = 0x8a004,
+ .halt_check = BRANCH_VOTED,
+ .hwcg_reg = 0x8a004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x8a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_mfab_axis_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_q6_memnoc_axi_clk = {
+ .halt_reg = 0x8a154,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x8a154,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_q6_memnoc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_snoc_axi_clk = {
+ .halt_reg = 0x8a150,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8a150,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_snoc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_vs_clk = {
+ .halt_reg = 0x7a048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0x6b01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_pcie_0_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0x6b018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_clkref_clk = {
+ .halt_reg = 0x8c00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0x6b014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+<<<<<<<
+ .halt_check = BRANCH_HALT_SKIP,
+=======
+ .halt_reg = 0x6b020,
+ .halt_check = BRANCH_HALT_DELAY,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0x6b010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = {
+ .halt_reg = 0x6b00c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_aux_clk = {
+ .halt_reg = 0x8d01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(29),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_pcie_1_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
+ .halt_reg = 0x8d018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8d018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(28),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_clkref_clk = {
+ .halt_reg = 0x8c02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
+ .halt_reg = 0x8d014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_pipe_clk = {
+<<<<<<<
+ .halt_check = BRANCH_HALT_SKIP,
+=======
+ .halt_reg = 0x8d020,
+ .halt_check = BRANCH_HALT_DELAY,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(30),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_axi_clk = {
+ .halt_reg = 0x8d010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8d010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_q2a_axi_clk = {
+ .halt_reg = 0x8d00c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_aux_clk = {
+ .halt_reg = 0x6f004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6f004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_phy_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_pcie_0_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_refgen_clk = {
+ .halt_reg = 0x6f02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6f02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_phy_refgen_clk",
+ .parent_names = (const char *[]){
+ "gcc_pcie_phy_refgen_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_names = (const char *[]){
+ "gcc_pdm2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x33004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x33004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x33004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x33008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x34004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x34004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_ahb_clk = {
+ .halt_reg = 0xb014,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_camera_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp_ahb_clk = {
+ .halt_reg = 0xb018,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_disp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_ahb_clk = {
+ .halt_reg = 0xb010,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_video_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x17030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x17160,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x17290,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x173c0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s3_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x174f0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s4_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x17620,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s5_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s6_clk = {
+ .halt_reg = 0x17750,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s6_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s6_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s7_clk = {
+ .halt_reg = 0x17880,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s7_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s7_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x18014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap1_s0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x18144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(23),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap1_s1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x18274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(24),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap1_s2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x183a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap1_s3_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x184d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap1_s4_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x18604,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap1_s5_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s6_clk = {
+ .halt_reg = 0x18734,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(28),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s6_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap1_s6_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s7_clk = {
+ .halt_reg = 0x18864,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(29),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s7_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap1_s7_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x17008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x1800c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x18010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x18010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x14008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x14004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_names = (const char *[]){
+ "gcc_sdcc2_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_ahb_clk = {
+ .halt_reg = 0x16008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_apps_clk = {
+ .halt_reg = 0x16004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk",
+ .parent_names = (const char *[]){
+ "gcc_sdcc4_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = {
+ .halt_reg = 0x414c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sys_noc_cpuss_ahb_clk",
+ .parent_names = (const char *[]){
+ "gcc_cpuss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_ahb_clk = {
+ .halt_reg = 0x36004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_inactivity_timers_clk = {
+ .halt_reg = 0x3600c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_inactivity_timers_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_ref_clk = {
+ .halt_reg = 0x36008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ref_clk",
+ .parent_names = (const char *[]){
+ "gcc_tsif_ref_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_ahb_clk = {
+ .halt_reg = 0x75010,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x75010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_axi_clk = {
+ .halt_reg = 0x7500c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7500c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7500c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_card_axi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_clkref_clk = {
+ .halt_reg = 0x8c004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_ice_core_clk = {
+ .halt_reg = 0x75058,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x75058,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_ice_core_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_card_ice_core_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_phy_aux_clk = {
+ .halt_reg = 0x7508c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7508c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7508c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_phy_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_card_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_rx_symbol_0_clk = {
+<<<<<<<
+ .halt_check = BRANCH_HALT_SKIP,
+=======
+ .halt_reg = 0x75018,
+ .halt_check = BRANCH_HALT_DELAY,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0x75018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_rx_symbol_1_clk = {
+<<<<<<<
+ .halt_check = BRANCH_HALT_SKIP,
+=======
+ .halt_reg = 0x750a8,
+ .halt_check = BRANCH_HALT_DELAY,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0x750a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_rx_symbol_1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_tx_symbol_0_clk = {
+<<<<<<<
+ .halt_check = BRANCH_HALT_SKIP,
+=======
+ .halt_reg = 0x75014,
+ .halt_check = BRANCH_HALT_DELAY,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0x75014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_unipro_core_clk = {
+ .halt_reg = 0x75054,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x75054,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_unipro_core_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_card_unipro_core_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_mem_clkref_clk = {
+ .halt_reg = 0x8c000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_mem_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0x77010,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x7700c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7700c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7700c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_phy_axi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x77058,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77058,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_phy_ice_core_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x7708c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7708c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7708c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_phy_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+<<<<<<<
+ .halt_check = BRANCH_HALT_SKIP,
+=======
+ .halt_reg = 0x77018,
+ .halt_check = BRANCH_HALT_DELAY,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0x77018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = {
+<<<<<<<
+ .halt_check = BRANCH_HALT_SKIP,
+=======
+ .halt_reg = 0x770a8,
+ .halt_check = BRANCH_HALT_DELAY,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0x770a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+<<<<<<<
+ .halt_check = BRANCH_HALT_SKIP,
+=======
+ .halt_reg = 0x77014,
+ .halt_check = BRANCH_HALT_DELAY,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0x77014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x77054,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77054,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_phy_unipro_core_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0xf00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_prim_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0xf014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_prim_mock_utmi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0xf010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_master_clk = {
+ .halt_reg = 0x1000c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1000c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_master_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_sec_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_mock_utmi_clk = {
+ .halt_reg = 0x10014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_mock_utmi_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_sec_mock_utmi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_sleep_clk = {
+ .halt_reg = 0x10010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_clkref_clk = {
+ .halt_reg = 0x8c008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0xf04c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf04c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb3_prim_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0xf050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb3_prim_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+<<<<<<<
+ .halt_check = BRANCH_HALT_SKIP,
+=======
+ .halt_reg = 0xf054,
+ .halt_check = BRANCH_HALT_DELAY,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0xf054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_clkref_clk = {
+ .halt_reg = 0x8c028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_aux_clk = {
+ .halt_reg = 0x1004c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1004c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb3_sec_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_com_aux_clk = {
+ .halt_reg = 0x10050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_com_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb3_sec_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_pipe_clk = {
+<<<<<<<
+ .halt_check = BRANCH_HALT_SKIP,
+=======
+ .halt_reg = 0x10054,
+ .halt_check = BRANCH_HALT_DELAY,
+>>>>>>>
+ .clkr = {
+ .enable_reg = 0x10054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = {
+ .halt_reg = 0x6a004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x6a004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_phy_cfg_ahb2phy_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vdda_vs_clk = {
+ .halt_reg = 0x7a00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vdda_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vddcx_vs_clk = {
+ .halt_reg = 0x7a004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vddcx_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vddmx_vs_clk = {
+ .halt_reg = 0x7a008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vddmx_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_ahb_clk = {
+ .halt_reg = 0xb004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_ahb_clk",
+<<<<<<<
+=======
+ .flags = CLK_IS_CRITICAL,
+>>>>>>>
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi_clk = {
+ .halt_reg = 0xb01c,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0xb01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_xo_clk = {
+ .halt_reg = 0xb028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_xo_clk",
+<<<<<<<
+=======
+ .flags = CLK_IS_CRITICAL,
+>>>>>>>
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vs_ctrl_ahb_clk = {
+ .halt_reg = 0x7a014,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7a014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7a014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vs_ctrl_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vs_ctrl_clk = {
+ .halt_reg = 0x7a010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vs_ctrl_clk",
+ .parent_names = (const char *[]){
+ "gcc_vs_ctrl_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc pcie_0_gdsc = {
+ .gdscr = 0x6b004,
+ .pd = {
+ .name = "pcie_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = POLL_CFG_GDSCR,
+>>>>>>>
+};
+
+static struct gdsc pcie_1_gdsc = {
+ .gdscr = 0x8d004,
+ .pd = {
+ .name = "pcie_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = POLL_CFG_GDSCR,
+>>>>>>>
+};
+
+static struct gdsc ufs_card_gdsc = {
+ .gdscr = 0x75004,
+ .pd = {
+ .name = "ufs_card_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = POLL_CFG_GDSCR,
+>>>>>>>
+};
+
+static struct gdsc ufs_phy_gdsc = {
+ .gdscr = 0x77004,
+ .pd = {
+ .name = "ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = POLL_CFG_GDSCR,
+>>>>>>>
+};
+
+static struct gdsc usb30_prim_gdsc = {
+ .gdscr = 0xf004,
+ .pd = {
+ .name = "usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = POLL_CFG_GDSCR,
+>>>>>>>
+};
+
+static struct gdsc usb30_sec_gdsc = {
+ .gdscr = 0x10004,
+ .pd = {
+ .name = "usb30_sec_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = POLL_CFG_GDSCR,
+>>>>>>>
+};
+
+static struct gdsc hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc = {
+ .gdscr = 0x7d030,
+ .pd = {
+ .name = "hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = VOTABLE,
+>>>>>>>
+};
+
+static struct gdsc hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc = {
+ .gdscr = 0x7d03c,
+ .pd = {
+ .name = "hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = VOTABLE,
+>>>>>>>
+};
+
+static struct gdsc hlos1_vote_aggre_noc_mmu_tbu1_gdsc = {
+ .gdscr = 0x7d034,
+ .pd = {
+ .name = "hlos1_vote_aggre_noc_mmu_tbu1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = VOTABLE,
+>>>>>>>
+};
+
+static struct gdsc hlos1_vote_aggre_noc_mmu_tbu2_gdsc = {
+ .gdscr = 0x7d038,
+ .pd = {
+ .name = "hlos1_vote_aggre_noc_mmu_tbu2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = VOTABLE,
+>>>>>>>
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = {
+ .gdscr = 0x7d040,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = VOTABLE,
+>>>>>>>
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = {
+ .gdscr = 0x7d048,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = VOTABLE,
+>>>>>>>
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = {
+ .gdscr = 0x7d044,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_sf_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+<<<<<<<
+=======
+ .flags = VOTABLE,
+};
+
+static struct clk_hw *gcc_sdm845_hws[] = {
+ [BI_TCXO] = &bi_tcxo.hw,
+>>>>>>>
+};
+
+static struct clk_regmap *gcc_sdm845_clocks[] = {
+ [GCC_AGGRE_NOC_PCIE_TBU_CLK] = &gcc_aggre_noc_pcie_tbu_clk.clkr,
+ [GCC_AGGRE_UFS_CARD_AXI_CLK] = &gcc_aggre_ufs_card_axi_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_AGGRE_USB3_SEC_AXI_CLK] = &gcc_aggre_usb3_sec_axi_clk.clkr,
+ [GCC_APC_VS_CLK] = &gcc_apc_vs_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_AHB_CLK] = &gcc_camera_ahb_clk.clkr,
+ [GCC_CAMERA_AXI_CLK] = &gcc_camera_axi_clk.clkr,
+ [GCC_CAMERA_XO_CLK] = &gcc_camera_xo_clk.clkr,
+ [GCC_CE1_AHB_CLK] = &gcc_ce1_ahb_clk.clkr,
+ [GCC_CE1_AXI_CLK] = &gcc_ce1_axi_clk.clkr,
+ [GCC_CE1_CLK] = &gcc_ce1_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_SEC_AXI_CLK] = &gcc_cfg_noc_usb3_sec_axi_clk.clkr,
+ [GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr,
+ [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr,
+<<<<<<<
+=======
+ [GCC_CPUSS_DVM_BUS_CLK] = &gcc_cpuss_dvm_bus_clk.clkr,
+ [GCC_CPUSS_GNOC_CLK] = &gcc_cpuss_gnoc_clk.clkr,
+>>>>>>>
+ [GCC_CPUSS_RBCPR_CLK] = &gcc_cpuss_rbcpr_clk.clkr,
+ [GCC_CPUSS_RBCPR_CLK_SRC] = &gcc_cpuss_rbcpr_clk_src.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DISP_AHB_CLK] = &gcc_disp_ahb_clk.clkr,
+ [GCC_DISP_AXI_CLK] = &gcc_disp_axi_clk.clkr,
+ [GCC_DISP_GPLL0_CLK_SRC] = &gcc_disp_gpll0_clk_src.clkr,
+ [GCC_DISP_GPLL0_DIV_CLK_SRC] = &gcc_disp_gpll0_div_clk_src.clkr,
+ [GCC_DISP_XO_CLK] = &gcc_disp_xo_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_GPU_IREF_CLK] = &gcc_gpu_iref_clk.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_GPU_VS_CLK] = &gcc_gpu_vs_clk.clkr,
+ [GCC_MSS_AXIS2_CLK] = &gcc_mss_axis2_clk.clkr,
+ [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
+ [GCC_MSS_GPLL0_DIV_CLK_SRC] = &gcc_mss_gpll0_div_clk_src.clkr,
+ [GCC_MSS_MFAB_AXIS_CLK] = &gcc_mss_mfab_axis_clk.clkr,
+ [GCC_MSS_Q6_MEMNOC_AXI_CLK] = &gcc_mss_q6_memnoc_axi_clk.clkr,
+ [GCC_MSS_SNOC_AXI_CLK] = &gcc_mss_snoc_axi_clk.clkr,
+ [GCC_MSS_VS_CLK] = &gcc_mss_vs_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_CLKREF_CLK] = &gcc_pcie_0_clkref_clk.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK_SRC] = &gcc_pcie_1_aux_clk_src.clkr,
+ [GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr,
+ [GCC_PCIE_1_CLKREF_CLK] = &gcc_pcie_1_clkref_clk.clkr,
+ [GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr,
+ [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr,
+ [GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr,
+ [GCC_PCIE_1_SLV_Q2A_AXI_CLK] = &gcc_pcie_1_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_PHY_AUX_CLK] = &gcc_pcie_phy_aux_clk.clkr,
+ [GCC_PCIE_PHY_REFGEN_CLK] = &gcc_pcie_phy_refgen_clk.clkr,
+ [GCC_PCIE_PHY_REFGEN_CLK_SRC] = &gcc_pcie_phy_refgen_clk_src.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_AHB_CLK] = &gcc_qmip_camera_ahb_clk.clkr,
+ [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_AHB_CLK] = &gcc_qmip_video_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK] = &gcc_qupv3_wrap0_s6_clk.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK_SRC] = &gcc_qupv3_wrap0_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK] = &gcc_qupv3_wrap0_s7_clk.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK_SRC] = &gcc_qupv3_wrap0_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK] = &gcc_qupv3_wrap1_s6_clk.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK_SRC] = &gcc_qupv3_wrap1_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK] = &gcc_qupv3_wrap1_s7_clk.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK_SRC] = &gcc_qupv3_wrap1_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
+ [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
+ [GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdcc4_apps_clk_src.clkr,
+ [GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr,
+ [GCC_TSIF_AHB_CLK] = &gcc_tsif_ahb_clk.clkr,
+ [GCC_TSIF_INACTIVITY_TIMERS_CLK] =
+ &gcc_tsif_inactivity_timers_clk.clkr,
+ [GCC_TSIF_REF_CLK] = &gcc_tsif_ref_clk.clkr,
+ [GCC_TSIF_REF_CLK_SRC] = &gcc_tsif_ref_clk_src.clkr,
+ [GCC_UFS_CARD_AHB_CLK] = &gcc_ufs_card_ahb_clk.clkr,
+ [GCC_UFS_CARD_AXI_CLK] = &gcc_ufs_card_axi_clk.clkr,
+ [GCC_UFS_CARD_AXI_CLK_SRC] = &gcc_ufs_card_axi_clk_src.clkr,
+ [GCC_UFS_CARD_CLKREF_CLK] = &gcc_ufs_card_clkref_clk.clkr,
+ [GCC_UFS_CARD_ICE_CORE_CLK] = &gcc_ufs_card_ice_core_clk.clkr,
+ [GCC_UFS_CARD_ICE_CORE_CLK_SRC] = &gcc_ufs_card_ice_core_clk_src.clkr,
+ [GCC_UFS_CARD_PHY_AUX_CLK] = &gcc_ufs_card_phy_aux_clk.clkr,
+ [GCC_UFS_CARD_PHY_AUX_CLK_SRC] = &gcc_ufs_card_phy_aux_clk_src.clkr,
+ [GCC_UFS_CARD_RX_SYMBOL_0_CLK] = &gcc_ufs_card_rx_symbol_0_clk.clkr,
+ [GCC_UFS_CARD_RX_SYMBOL_1_CLK] = &gcc_ufs_card_rx_symbol_1_clk.clkr,
+ [GCC_UFS_CARD_TX_SYMBOL_0_CLK] = &gcc_ufs_card_tx_symbol_0_clk.clkr,
+ [GCC_UFS_CARD_UNIPRO_CORE_CLK] = &gcc_ufs_card_unipro_core_clk.clkr,
+ [GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC] =
+ &gcc_ufs_card_unipro_core_clk_src.clkr,
+ [GCC_UFS_MEM_CLKREF_CLK] = &gcc_ufs_mem_clkref_clk.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] =
+ &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] =
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB30_SEC_MASTER_CLK] = &gcc_usb30_sec_master_clk.clkr,
+ [GCC_USB30_SEC_MASTER_CLK_SRC] = &gcc_usb30_sec_master_clk_src.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_CLK] = &gcc_usb30_sec_mock_utmi_clk.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_CLK_SRC] =
+ &gcc_usb30_sec_mock_utmi_clk_src.clkr,
+ [GCC_USB30_SEC_SLEEP_CLK] = &gcc_usb30_sec_sleep_clk.clkr,
+ [GCC_USB3_PRIM_CLKREF_CLK] = &gcc_usb3_prim_clkref_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB3_SEC_CLKREF_CLK] = &gcc_usb3_sec_clkref_clk.clkr,
+ [GCC_USB3_SEC_PHY_AUX_CLK] = &gcc_usb3_sec_phy_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_AUX_CLK_SRC] = &gcc_usb3_sec_phy_aux_clk_src.clkr,
+ [GCC_USB3_SEC_PHY_COM_AUX_CLK] = &gcc_usb3_sec_phy_com_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_PIPE_CLK] = &gcc_usb3_sec_phy_pipe_clk.clkr,
+ [GCC_USB_PHY_CFG_AHB2PHY_CLK] = &gcc_usb_phy_cfg_ahb2phy_clk.clkr,
+ [GCC_VDDA_VS_CLK] = &gcc_vdda_vs_clk.clkr,
+ [GCC_VDDCX_VS_CLK] = &gcc_vddcx_vs_clk.clkr,
+ [GCC_VDDMX_VS_CLK] = &gcc_vddmx_vs_clk.clkr,
+ [GCC_VIDEO_AHB_CLK] = &gcc_video_ahb_clk.clkr,
+ [GCC_VIDEO_AXI_CLK] = &gcc_video_axi_clk.clkr,
+ [GCC_VIDEO_XO_CLK] = &gcc_video_xo_clk.clkr,
+ [GCC_VS_CTRL_AHB_CLK] = &gcc_vs_ctrl_ahb_clk.clkr,
+ [GCC_VS_CTRL_CLK] = &gcc_vs_ctrl_clk.clkr,
+ [GCC_VS_CTRL_CLK_SRC] = &gcc_vs_ctrl_clk_src.clkr,
+ [GCC_VSENSOR_CLK_SRC] = &gcc_vsensor_clk_src.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_OUT_EVEN] = &gpll0_out_even.clkr,
+ [GPLL4] = &gpll4.clkr,
+};
+
+static const struct qcom_reset_map gcc_sdm845_resets[] = {
+ [GCC_MMSS_BCR] = { 0xb000 },
+ [GCC_PCIE_0_BCR] = { 0x6b000 },
+ [GCC_PCIE_1_BCR] = { 0x8d000 },
+ [GCC_PCIE_PHY_BCR] = { 0x6f000 },
+ [GCC_PDM_BCR] = { 0x33000 },
+ [GCC_PRNG_BCR] = { 0x34000 },
+ [GCC_QUPV3_WRAPPER_0_BCR] = { 0x17000 },
+ [GCC_QUPV3_WRAPPER_1_BCR] = { 0x18000 },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x12000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0x12004 },
+ [GCC_SDCC2_BCR] = { 0x14000 },
+ [GCC_SDCC4_BCR] = { 0x16000 },
+ [GCC_TSIF_BCR] = { 0x36000 },
+ [GCC_UFS_CARD_BCR] = { 0x75000 },
+ [GCC_UFS_PHY_BCR] = { 0x77000 },
+ [GCC_USB30_PRIM_BCR] = { 0xf000 },
+ [GCC_USB30_SEC_BCR] = { 0x10000 },
+ [GCC_USB3_PHY_PRIM_BCR] = { 0x50000 },
+ [GCC_USB3PHY_PHY_PRIM_BCR] = { 0x50004 },
+ [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x50008 },
+ [GCC_USB3_PHY_SEC_BCR] = { 0x5000c },
+ [GCC_USB3PHY_PHY_SEC_BCR] = { 0x50010 },
+ [GCC_USB3_DP_PHY_SEC_BCR] = { 0x50014 },
+ [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 },
+ [GCC_PCIE_0_PHY_BCR] = { 0x6c01c },
+ [GCC_PCIE_1_PHY_BCR] = { 0x8e01c },
+};
+
+static struct gdsc *gcc_sdm845_gdscs[] = {
+ [PCIE_0_GDSC] = &pcie_0_gdsc,
+ [PCIE_1_GDSC] = &pcie_1_gdsc,
+ [UFS_CARD_GDSC] = &ufs_card_gdsc,
+ [UFS_PHY_GDSC] = &ufs_phy_gdsc,
+ [USB30_PRIM_GDSC] = &usb30_prim_gdsc,
+ [USB30_SEC_GDSC] = &usb30_sec_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_AUDIO_TBU_GDSC] =
+ &hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_PCIE_TBU_GDSC] =
+ &hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_TBU1_GDSC] =
+ &hlos1_vote_aggre_noc_mmu_tbu1_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_TBU2_GDSC] =
+ &hlos1_vote_aggre_noc_mmu_tbu2_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC] =
+ &hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC] =
+ &hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_SF_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_sf_gdsc,
+};
+
+static const struct regmap_config gcc_sdm845_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x182090,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_sdm845_desc = {
+ .config = &gcc_sdm845_regmap_config,
+ .clks = gcc_sdm845_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sdm845_clocks),
+ .resets = gcc_sdm845_resets,
+ .num_resets = ARRAY_SIZE(gcc_sdm845_resets),
+ .gdscs = gcc_sdm845_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sdm845_gdscs),
+};
+
+static const struct of_device_id gcc_sdm845_match_table[] = {
+ { .compatible = "qcom,gcc-sdm845" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_sdm845_match_table);
+
+static int gcc_sdm845_probe(struct platform_device *pdev)
+{
+<<<<<<<
+ struct device *dev = &pdev->dev;
+ struct regmap *regmap;
+ int i, ret;
+=======
+ struct regmap *regmap;
+>>>>>>>
+
+ regmap = qcom_cc_map(pdev, &gcc_sdm845_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+<<<<<<<
+ /* Disable the GPLL0 active input to MMSS and GPU via MISC registers */
+ regmap_update_bits(regmap, 0x09ffc, 0x3, 0x3);
+ regmap_update_bits(regmap, 0x71028, 0x3, 0x3);
+
+ /* Enable CPUSS clocks */
+ regmap_update_bits(regmap, 0x48190, BIT(0), 0x1);
+ regmap_update_bits(regmap, 0x52004, BIT(22), 0x1);
+
+=======
+ for (i = 0; i < ARRAY_SIZE(gcc_sdm845_hws); i++) {
+ ret = devm_clk_hw_register(dev, gcc_sdm845_hws[i]);
+ if (ret)
+ return ret;
+ }
+
+ /* Get the rate for safe source */
+ cxo_safe_src_f.freq = clk_get_rate(bi_tcxo.hw.clk);
+
+ /* Disable the GPLL0 active input to MMSS and GPU via MISC registers */
+ regmap_update_bits(regmap, 0x09FFC, 0x3, 0x3);
+ regmap_update_bits(regmap, 0x71028, 0x3, 0x3);
+
+>>>>>>>
+ return qcom_cc_really_probe(pdev, &gcc_sdm845_desc, regmap);
+}
+
+static struct platform_driver gcc_sdm845_driver = {
+ .probe = gcc_sdm845_probe,
+ .driver = {
+ .name = "gcc-sdm845",
+ .of_match_table = gcc_sdm845_match_table,
+ },
+};
+
+static int __init gcc_sdm845_init(void)
+{
+ return platform_driver_register(&gcc_sdm845_driver);
+}
+subsys_initcall(gcc_sdm845_init);
+
+static void __exit gcc_sdm845_exit(void)
+{
+ platform_driver_unregister(&gcc_sdm845_driver);
+}
+module_exit(gcc_sdm845_exit);
+
+MODULE_DESCRIPTION("QTI GCC SDM845 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:gcc-sdm845");
diff --git a/rr-cache/44553d9ece3afb8faca21ac853fce183d527cb76/postimage b/rr-cache/44553d9ece3afb8faca21ac853fce183d527cb76/postimage
new file mode 100644
index 0000000..d393003
--- /dev/null
+++ b/rr-cache/44553d9ece3afb8faca21ac853fce183d527cb76/postimage
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+// Copyright (c) 2018, Linaro Limited
+
+#include "q6dsp-common.h"
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+
+int q6dsp_map_channels(u8 ch_map[PCM_MAX_NUM_CHANNEL], int ch)
+{
+ memset(ch_map, 0, PCM_MAX_NUM_CHANNEL);
+
+ switch (ch) {
+ case 1:
+ ch_map[0] = PCM_CHANNEL_FC;
+ break;
+ case 2:
+ ch_map[0] = PCM_CHANNEL_FL;
+ ch_map[1] = PCM_CHANNEL_FR;
+ break;
+ case 3:
+ ch_map[0] = PCM_CHANNEL_FL;
+ ch_map[1] = PCM_CHANNEL_FR;
+ ch_map[2] = PCM_CHANNEL_FC;
+ break;
+ case 4:
+ ch_map[0] = PCM_CHANNEL_FL;
+ ch_map[1] = PCM_CHANNEL_FR;
+ ch_map[2] = PCM_CHANNEL_LS;
+ ch_map[3] = PCM_CHANNEL_RS;
+ break;
+ case 5:
+ ch_map[0] = PCM_CHANNEL_FL;
+ ch_map[1] = PCM_CHANNEL_FR;
+ ch_map[2] = PCM_CHANNEL_FC;
+ ch_map[3] = PCM_CHANNEL_LS;
+ ch_map[4] = PCM_CHANNEL_RS;
+ break;
+ case 6:
+ ch_map[0] = PCM_CHANNEL_FL;
+ ch_map[1] = PCM_CHANNEL_FR;
+ ch_map[2] = PCM_CHANNEL_LFE;
+ ch_map[3] = PCM_CHANNEL_FC;
+ ch_map[4] = PCM_CHANNEL_LS;
+ ch_map[5] = PCM_CHANNEL_RS;
+ break;
+ case 8:
+ ch_map[0] = PCM_CHANNEL_FL;
+ ch_map[1] = PCM_CHANNEL_FR;
+ ch_map[2] = PCM_CHANNEL_LFE;
+ ch_map[3] = PCM_CHANNEL_FC;
+ ch_map[4] = PCM_CHANNEL_LS;
+ ch_map[5] = PCM_CHANNEL_RS;
+ ch_map[6] = PCM_CHANNEL_LB;
+ ch_map[7] = PCM_CHANNEL_RB;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(q6dsp_map_channels);
+MODULE_LICENSE("GPL v2");
diff --git a/rr-cache/44553d9ece3afb8faca21ac853fce183d527cb76/preimage b/rr-cache/44553d9ece3afb8faca21ac853fce183d527cb76/preimage
new file mode 100644
index 0000000..163e6e4
--- /dev/null
+++ b/rr-cache/44553d9ece3afb8faca21ac853fce183d527cb76/preimage
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0
+<<<<<<<
+/*
+ * Copyright (c) 2011-2017, The Linux Foundation
+ * Copyright (c) 2018, Linaro Limited
+ */
+=======
+// Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+// Copyright (c) 2018, Linaro Limited
+
+>>>>>>>
+#include "q6dsp-common.h"
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+
+<<<<<<<
+int q6dsp_map_channels(u8 ch_map[PCM_FORMAT_MAX_NUM_CHANNEL], int ch)
+{
+ memset(ch_map, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
+
+ switch (ch) {
+ case 1:
+ ch_map[0] = PCM_CHANNEL_FC;
+ break;
+ case 2:
+ ch_map[0] = PCM_CHANNEL_FL;
+ ch_map[1] = PCM_CHANNEL_FR;
+ break;
+ case 3:
+ ch_map[0] = PCM_CHANNEL_FL;
+ ch_map[1] = PCM_CHANNEL_FR;
+ ch_map[2] = PCM_CHANNEL_FC;
+ break;
+ case 4:
+ ch_map[0] = PCM_CHANNEL_FL;
+ ch_map[1] = PCM_CHANNEL_FR;
+ ch_map[2] = PCM_CHANNEL_LS;
+ ch_map[3] = PCM_CHANNEL_RS;
+ break;
+ case 5:
+ ch_map[0] = PCM_CHANNEL_FL;
+ ch_map[1] = PCM_CHANNEL_FR;
+ ch_map[2] = PCM_CHANNEL_FC;
+ ch_map[3] = PCM_CHANNEL_LS;
+ ch_map[4] = PCM_CHANNEL_RS;
+ break;
+ case 6:
+ ch_map[0] = PCM_CHANNEL_FL;
+ ch_map[1] = PCM_CHANNEL_FR;
+ ch_map[2] = PCM_CHANNEL_LFE;
+ ch_map[3] = PCM_CHANNEL_FC;
+ ch_map[4] = PCM_CHANNEL_LS;
+ ch_map[5] = PCM_CHANNEL_RS;
+ break;
+ case 8:
+ ch_map[0] = PCM_CHANNEL_FL;
+ ch_map[1] = PCM_CHANNEL_FR;
+ ch_map[2] = PCM_CHANNEL_LFE;
+ ch_map[3] = PCM_CHANNEL_FC;
+ ch_map[4] = PCM_CHANNEL_LS;
+ ch_map[5] = PCM_CHANNEL_RS;
+ ch_map[6] = PCM_CHANNEL_LB;
+ ch_map[7] = PCM_CHANNEL_RB;
+ break;
+ default:
+ return -EINVAL;
+=======
+int q6dsp_map_channels(u8 ch_map[PCM_MAX_NUM_CHANNEL], int ch)
+{
+ memset(ch_map, 0, PCM_MAX_NUM_CHANNEL);
+
+ switch (ch) {
+ case 1:
+ ch_map[0] = PCM_CHANNEL_FC;
+ break;
+ case 2:
+ ch_map[0] = PCM_CHANNEL_FL;
+ ch_map[1] = PCM_CHANNEL_FR;
+ break;
+ case 3:
+ ch_map[0] = PCM_CHANNEL_FL;
+ ch_map[1] = PCM_CHANNEL_FR;
+ ch_map[2] = PCM_CHANNEL_FC;
+ break;
+ case 4:
+ ch_map[0] = PCM_CHANNEL_FL;
+ ch_map[1] = PCM_CHANNEL_FR;
+ ch_map[2] = PCM_CHANNEL_LS;
+ ch_map[3] = PCM_CHANNEL_RS;
+ break;
+ case 5:
+ ch_map[0] = PCM_CHANNEL_FL;
+ ch_map[1] = PCM_CHANNEL_FR;
+ ch_map[2] = PCM_CHANNEL_FC;
+ ch_map[3] = PCM_CHANNEL_LS;
+ ch_map[4] = PCM_CHANNEL_RS;
+ break;
+ case 6:
+ ch_map[0] = PCM_CHANNEL_FL;
+ ch_map[1] = PCM_CHANNEL_FR;
+ ch_map[2] = PCM_CHANNEL_LFE;
+ ch_map[3] = PCM_CHANNEL_FC;
+ ch_map[4] = PCM_CHANNEL_LS;
+ ch_map[5] = PCM_CHANNEL_RS;
+ break;
+ case 8:
+ ch_map[0] = PCM_CHANNEL_FL;
+ ch_map[1] = PCM_CHANNEL_FR;
+ ch_map[2] = PCM_CHANNEL_LFE;
+ ch_map[3] = PCM_CHANNEL_FC;
+ ch_map[4] = PCM_CHANNEL_LS;
+ ch_map[5] = PCM_CHANNEL_RS;
+ ch_map[6] = PCM_CHANNEL_LB;
+ ch_map[7] = PCM_CHANNEL_RB;
+ break;
+ default:
+ return -EINVAL;
+>>>>>>>
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(q6dsp_map_channels);
+MODULE_LICENSE("GPL v2");
diff --git a/rr-cache/44553d9ece3afb8faca21ac853fce183d527cb76/thisimage b/rr-cache/44553d9ece3afb8faca21ac853fce183d527cb76/thisimage
new file mode 100644
index 0000000..163e6e4
--- /dev/null
+++ b/rr-cache/44553d9ece3afb8faca21ac853fce183d527cb76/thisimage
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0
+<<<<<<<
+/*
+ * Copyright (c) 2011-2017, The Linux Foundation
+ * Copyright (c) 2018, Linaro Limited
+ */
+=======
+// Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+// Copyright (c) 2018, Linaro Limited
+
+>>>>>>>
+#include "q6dsp-common.h"
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+
+<<<<<<<
+int q6dsp_map_channels(u8 ch_map[PCM_FORMAT_MAX_NUM_CHANNEL], int ch)
+{
+ memset(ch_map, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
+
+ switch (ch) {
+ case 1:
+ ch_map[0] = PCM_CHANNEL_FC;
+ break;
+ case 2:
+ ch_map[0] = PCM_CHANNEL_FL;
+ ch_map[1] = PCM_CHANNEL_FR;
+ break;
+ case 3:
+ ch_map[0] = PCM_CHANNEL_FL;
+ ch_map[1] = PCM_CHANNEL_FR;
+ ch_map[2] = PCM_CHANNEL_FC;
+ break;
+ case 4:
+ ch_map[0] = PCM_CHANNEL_FL;
+ ch_map[1] = PCM_CHANNEL_FR;
+ ch_map[2] = PCM_CHANNEL_LS;
+ ch_map[3] = PCM_CHANNEL_RS;
+ break;
+ case 5:
+ ch_map[0] = PCM_CHANNEL_FL;
+ ch_map[1] = PCM_CHANNEL_FR;
+ ch_map[2] = PCM_CHANNEL_FC;
+ ch_map[3] = PCM_CHANNEL_LS;
+ ch_map[4] = PCM_CHANNEL_RS;
+ break;
+ case 6:
+ ch_map[0] = PCM_CHANNEL_FL;
+ ch_map[1] = PCM_CHANNEL_FR;
+ ch_map[2] = PCM_CHANNEL_LFE;
+ ch_map[3] = PCM_CHANNEL_FC;
+ ch_map[4] = PCM_CHANNEL_LS;
+ ch_map[5] = PCM_CHANNEL_RS;
+ break;
+ case 8:
+ ch_map[0] = PCM_CHANNEL_FL;
+ ch_map[1] = PCM_CHANNEL_FR;
+ ch_map[2] = PCM_CHANNEL_LFE;
+ ch_map[3] = PCM_CHANNEL_FC;
+ ch_map[4] = PCM_CHANNEL_LS;
+ ch_map[5] = PCM_CHANNEL_RS;
+ ch_map[6] = PCM_CHANNEL_LB;
+ ch_map[7] = PCM_CHANNEL_RB;
+ break;
+ default:
+ return -EINVAL;
+=======
+int q6dsp_map_channels(u8 ch_map[PCM_MAX_NUM_CHANNEL], int ch)
+{
+ memset(ch_map, 0, PCM_MAX_NUM_CHANNEL);
+
+ switch (ch) {
+ case 1:
+ ch_map[0] = PCM_CHANNEL_FC;
+ break;
+ case 2:
+ ch_map[0] = PCM_CHANNEL_FL;
+ ch_map[1] = PCM_CHANNEL_FR;
+ break;
+ case 3:
+ ch_map[0] = PCM_CHANNEL_FL;
+ ch_map[1] = PCM_CHANNEL_FR;
+ ch_map[2] = PCM_CHANNEL_FC;
+ break;
+ case 4:
+ ch_map[0] = PCM_CHANNEL_FL;
+ ch_map[1] = PCM_CHANNEL_FR;
+ ch_map[2] = PCM_CHANNEL_LS;
+ ch_map[3] = PCM_CHANNEL_RS;
+ break;
+ case 5:
+ ch_map[0] = PCM_CHANNEL_FL;
+ ch_map[1] = PCM_CHANNEL_FR;
+ ch_map[2] = PCM_CHANNEL_FC;
+ ch_map[3] = PCM_CHANNEL_LS;
+ ch_map[4] = PCM_CHANNEL_RS;
+ break;
+ case 6:
+ ch_map[0] = PCM_CHANNEL_FL;
+ ch_map[1] = PCM_CHANNEL_FR;
+ ch_map[2] = PCM_CHANNEL_LFE;
+ ch_map[3] = PCM_CHANNEL_FC;
+ ch_map[4] = PCM_CHANNEL_LS;
+ ch_map[5] = PCM_CHANNEL_RS;
+ break;
+ case 8:
+ ch_map[0] = PCM_CHANNEL_FL;
+ ch_map[1] = PCM_CHANNEL_FR;
+ ch_map[2] = PCM_CHANNEL_LFE;
+ ch_map[3] = PCM_CHANNEL_FC;
+ ch_map[4] = PCM_CHANNEL_LS;
+ ch_map[5] = PCM_CHANNEL_RS;
+ ch_map[6] = PCM_CHANNEL_LB;
+ ch_map[7] = PCM_CHANNEL_RB;
+ break;
+ default:
+ return -EINVAL;
+>>>>>>>
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(q6dsp_map_channels);
+MODULE_LICENSE("GPL v2");
diff --git a/rr-cache/45c71f5f3b20c146e79944c1febc8c47e83e82c1/preimage b/rr-cache/45c71f5f3b20c146e79944c1febc8c47e83e82c1/preimage
new file mode 100644
index 0000000..60851f2
--- /dev/null
+++ b/rr-cache/45c71f5f3b20c146e79944c1febc8c47e83e82c1/preimage
@@ -0,0 +1,634 @@
+/*
+ * Qualcomm SCM driver
+ *
+ * Copyright (c) 2010,2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/cpumask.h>
+#include <linux/export.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/qcom_scm.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/clk.h>
+#include <linux/reset-controller.h>
+
+#include "qcom_scm.h"
+
+static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT);
+module_param(download_mode, bool, 0);
+
+#define SCM_HAS_CORE_CLK BIT(0)
+#define SCM_HAS_IFACE_CLK BIT(1)
+#define SCM_HAS_BUS_CLK BIT(2)
+
+struct qcom_scm {
+ struct device *dev;
+ struct clk *core_clk;
+ struct clk *iface_clk;
+ struct clk *bus_clk;
+ struct reset_controller_dev reset;
+
+ u64 dload_mode_addr;
+};
+
+struct qcom_scm_current_perm_info {
+ __le32 vmid;
+ __le32 perm;
+ __le64 ctx;
+ __le32 ctx_size;
+ __le32 unused;
+};
+
+struct qcom_scm_mem_map_info {
+ __le64 mem_addr;
+ __le64 mem_size;
+};
+
+static struct qcom_scm *__scm;
+
+static int qcom_scm_clk_enable(void)
+{
+ int ret;
+
+ ret = clk_prepare_enable(__scm->core_clk);
+ if (ret)
+ goto bail;
+
+ ret = clk_prepare_enable(__scm->iface_clk);
+ if (ret)
+ goto disable_core;
+
+ ret = clk_prepare_enable(__scm->bus_clk);
+ if (ret)
+ goto disable_iface;
+
+ return 0;
+
+disable_iface:
+ clk_disable_unprepare(__scm->iface_clk);
+disable_core:
+ clk_disable_unprepare(__scm->core_clk);
+bail:
+ return ret;
+}
+
+static void qcom_scm_clk_disable(void)
+{
+ clk_disable_unprepare(__scm->core_clk);
+ clk_disable_unprepare(__scm->iface_clk);
+ clk_disable_unprepare(__scm->bus_clk);
+}
+
+/**
+ * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
+ * @entry: Entry point function for the cpus
+ * @cpus: The cpumask of cpus that will use the entry point
+ *
+ * Set the cold boot address of the cpus. Any cpu outside the supported
+ * range would be removed from the cpu present mask.
+ */
+int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
+{
+ return __qcom_scm_set_cold_boot_addr(entry, cpus);
+}
+EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr);
+
+/**
+ * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
+ * @entry: Entry point function for the cpus
+ * @cpus: The cpumask of cpus that will use the entry point
+ *
+ * Set the Linux entry point for the SCM to transfer control to when coming
+ * out of a power down. CPU power down may be executed on cpuidle or hotplug.
+ */
+int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
+{
+ return __qcom_scm_set_warm_boot_addr(__scm->dev, entry, cpus);
+}
+EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr);
+
+/**
+ * qcom_scm_cpu_power_down() - Power down the cpu
+ * @flags - Flags to flush cache
+ *
+ * This is an end point to power down cpu. If there was a pending interrupt,
+ * the control would return from this function, otherwise, the cpu jumps to the
+ * warm boot entry point set for this cpu upon reset.
+ */
+void qcom_scm_cpu_power_down(u32 flags)
+{
+ __qcom_scm_cpu_power_down(flags);
+}
+EXPORT_SYMBOL(qcom_scm_cpu_power_down);
+
+/**
+ * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
+ *
+ * Return true if HDCP is supported, false if not.
+ */
+bool qcom_scm_hdcp_available(void)
+{
+ int ret = qcom_scm_clk_enable();
+
+ if (ret)
+ return ret;
+
+ ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
+ QCOM_SCM_CMD_HDCP);
+
+ qcom_scm_clk_disable();
+
+ return ret > 0 ? true : false;
+}
+EXPORT_SYMBOL(qcom_scm_hdcp_available);
+
+/**
+ * qcom_scm_hdcp_req() - Send HDCP request.
+ * @req: HDCP request array
+ * @req_cnt: HDCP request array count
+ * @resp: response buffer passed to SCM
+ *
+ * Write HDCP register(s) through SCM.
+ */
+int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
+{
+ int ret = qcom_scm_clk_enable();
+
+ if (ret)
+ return ret;
+
+ ret = __qcom_scm_hdcp_req(__scm->dev, req, req_cnt, resp);
+ qcom_scm_clk_disable();
+ return ret;
+}
+EXPORT_SYMBOL(qcom_scm_hdcp_req);
+
+/**
+ * qcom_scm_pas_supported() - Check if the peripheral authentication service is
+ * available for the given peripherial
+ * @peripheral: peripheral id
+ *
+ * Returns true if PAS is supported for this peripheral, otherwise false.
+ */
+bool qcom_scm_pas_supported(u32 peripheral)
+{
+ int ret;
+
+ ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
+ QCOM_SCM_PAS_IS_SUPPORTED_CMD);
+ if (ret <= 0)
+ return false;
+
+ return __qcom_scm_pas_supported(__scm->dev, peripheral);
+}
+EXPORT_SYMBOL(qcom_scm_pas_supported);
+
+/**
+ * qcom_scm_pas_init_image() - Initialize peripheral authentication service
+ * state machine for a given peripheral, using the
+ * metadata
+ * @peripheral: peripheral id
+ * @metadata: pointer to memory containing ELF header, program header table
+ * and optional blob of data used for authenticating the metadata
+ * and the rest of the firmware
+ * @size: size of the metadata
+ *
+ * Returns 0 on success.
+ */
+int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size)
+{
+ dma_addr_t mdata_phys;
+ void *mdata_buf;
+ int ret;
+
+ /*
+ * During the scm call memory protection will be enabled for the meta
+ * data blob, so make sure it's physically contiguous, 4K aligned and
+ * non-cachable to avoid XPU violations.
+ */
+ mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
+ GFP_KERNEL);
+ if (!mdata_buf) {
+ dev_err(__scm->dev, "Allocation of metadata buffer failed.\n");
+ return -ENOMEM;
+ }
+ memcpy(mdata_buf, metadata, size);
+
+ ret = qcom_scm_clk_enable();
+ if (ret)
+ goto free_metadata;
+
+ ret = __qcom_scm_pas_init_image(__scm->dev, peripheral, mdata_phys);
+
+ qcom_scm_clk_disable();
+
+free_metadata:
+ dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_scm_pas_init_image);
+
+/**
+ * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
+ * for firmware loading
+ * @peripheral: peripheral id
+ * @addr: start address of memory area to prepare
+ * @size: size of the memory area to prepare
+ *
+ * Returns 0 on success.
+ */
+int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
+{
+ int ret;
+
+ ret = qcom_scm_clk_enable();
+ if (ret)
+ return ret;
+
+ ret = __qcom_scm_pas_mem_setup(__scm->dev, peripheral, addr, size);
+ qcom_scm_clk_disable();
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_scm_pas_mem_setup);
+
+/**
+ * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
+ * and reset the remote processor
+ * @peripheral: peripheral id
+ *
+ * Return 0 on success.
+ */
+int qcom_scm_pas_auth_and_reset(u32 peripheral)
+{
+ int ret;
+
+ ret = qcom_scm_clk_enable();
+ if (ret)
+ return ret;
+
+ ret = __qcom_scm_pas_auth_and_reset(__scm->dev, peripheral);
+ qcom_scm_clk_disable();
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset);
+
+/**
+ * qcom_scm_pas_shutdown() - Shut down the remote processor
+ * @peripheral: peripheral id
+ *
+ * Returns 0 on success.
+ */
+int qcom_scm_pas_shutdown(u32 peripheral)
+{
+ int ret;
+
+ ret = qcom_scm_clk_enable();
+ if (ret)
+ return ret;
+
+ ret = __qcom_scm_pas_shutdown(__scm->dev, peripheral);
+ qcom_scm_clk_disable();
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_scm_pas_shutdown);
+
+static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long idx)
+{
+ if (idx != 0)
+ return -EINVAL;
+
+ return __qcom_scm_pas_mss_reset(__scm->dev, 1);
+}
+
+static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long idx)
+{
+ if (idx != 0)
+ return -EINVAL;
+
+ return __qcom_scm_pas_mss_reset(__scm->dev, 0);
+}
+
+static const struct reset_control_ops qcom_scm_pas_reset_ops = {
+ .assert = qcom_scm_pas_reset_assert,
+ .deassert = qcom_scm_pas_reset_deassert,
+};
+
+int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
+{
+ return __qcom_scm_restore_sec_cfg(__scm->dev, device_id, spare);
+}
+EXPORT_SYMBOL(qcom_scm_restore_sec_cfg);
+
+int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
+{
+ return __qcom_scm_iommu_secure_ptbl_size(__scm->dev, spare, size);
+}
+EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size);
+
+int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
+{
+ return __qcom_scm_iommu_secure_ptbl_init(__scm->dev, addr, size, spare);
+}
+EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
+
+int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
+{
+ return __qcom_scm_io_readl(__scm->dev, addr, val);
+}
+EXPORT_SYMBOL(qcom_scm_io_readl);
+
+int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
+{
+ return __qcom_scm_io_writel(__scm->dev, addr, val);
+}
+EXPORT_SYMBOL(qcom_scm_io_writel);
+
+static void qcom_scm_set_download_mode(bool enable)
+{
+ bool avail;
+ int ret = 0;
+
+ avail = __qcom_scm_is_call_available(__scm->dev,
+ QCOM_SCM_SVC_BOOT,
+ QCOM_SCM_SET_DLOAD_MODE);
+ if (avail) {
+ ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
+ } else if (__scm->dload_mode_addr) {
+ ret = __qcom_scm_io_writel(__scm->dev, __scm->dload_mode_addr,
+ enable ? QCOM_SCM_SET_DLOAD_MODE : 0);
+ } else {
+ dev_err(__scm->dev,
+ "No available mechanism for setting download mode\n");
+ }
+
+ if (ret)
+ dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
+}
+
+static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
+{
+ struct device_node *tcsr;
+ struct device_node *np = dev->of_node;
+ struct resource res;
+ u32 offset;
+ int ret;
+
+ tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
+ if (!tcsr)
+ return 0;
+
+ ret = of_address_to_resource(tcsr, 0, &res);
+ of_node_put(tcsr);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
+ if (ret < 0)
+ return ret;
+
+ *addr = res.start + offset;
+
+ return 0;
+}
+
+/**
+ * qcom_scm_is_available() - Checks if SCM is available
+ */
+bool qcom_scm_is_available(void)
+{
+ return !!__scm;
+}
+EXPORT_SYMBOL(qcom_scm_is_available);
+
+int qcom_scm_set_remote_state(u32 state, u32 id)
+{
+ return __qcom_scm_set_remote_state(__scm->dev, state, id);
+}
+EXPORT_SYMBOL(qcom_scm_set_remote_state);
+
+/**
+ * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
+ * @mem_addr: mem region whose ownership need to be reassigned
+ * @mem_sz: size of the region.
+ * @srcvm: vmid for current set of owners, each set bit in
+ * flag indicate a unique owner
+ * @newvm: array having new owners and corrsponding permission
+ * flags
+ * @dest_cnt: number of owners in next set.
+ *
+ * Return negative errno on failure, 0 on success, with @srcvm updated.
+ */
+int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
+ unsigned int *srcvm,
+ struct qcom_scm_vmperm *newvm, int dest_cnt)
+{
+ struct qcom_scm_current_perm_info *destvm;
+ struct qcom_scm_mem_map_info *mem_to_map;
+ phys_addr_t mem_to_map_phys;
+ phys_addr_t dest_phys;
+ phys_addr_t ptr_phys;
+ size_t mem_to_map_sz;
+ size_t dest_sz;
+ size_t src_sz;
+ size_t ptr_sz;
+ int next_vm;
+ __le32 *src;
+ void *ptr;
+ int ret;
+ int len;
+ int i;
+
+ src_sz = hweight_long(*srcvm) * sizeof(*src);
+ mem_to_map_sz = sizeof(*mem_to_map);
+ dest_sz = dest_cnt * sizeof(*destvm);
+ ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
+ ALIGN(dest_sz, SZ_64);
+
+ ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ /* Fill source vmid detail */
+ src = ptr;
+ len = hweight_long(*srcvm);
+ for (i = 0; i < len; i++) {
+ src[i] = cpu_to_le32(ffs(*srcvm) - 1);
+ *srcvm ^= 1 << (ffs(*srcvm) - 1);
+ }
+
+ /* Fill details of mem buff to map */
+ mem_to_map = ptr + ALIGN(src_sz, SZ_64);
+ mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
+ mem_to_map[0].mem_addr = cpu_to_le64(mem_addr);
+ mem_to_map[0].mem_size = cpu_to_le64(mem_sz);
+
+ next_vm = 0;
+ /* Fill details of next vmid detail */
+ destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
+ dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
+ for (i = 0; i < dest_cnt; i++) {
+ destvm[i].vmid = cpu_to_le32(newvm[i].vmid);
+ destvm[i].perm = cpu_to_le32(newvm[i].perm);
+ destvm[i].ctx = 0;
+ destvm[i].ctx_size = 0;
+ next_vm |= BIT(newvm[i].vmid);
+ }
+
+ ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
+ ptr_phys, src_sz, dest_phys, dest_sz);
+ dma_free_coherent(__scm->dev, ALIGN(ptr_sz, SZ_64), ptr, ptr_phys);
+ if (ret) {
+ dev_err(__scm->dev,
+ "Assign memory protection call failed %d.\n", ret);
+ return -EINVAL;
+ }
+
+ *srcvm = next_vm;
+ return 0;
+}
+EXPORT_SYMBOL(qcom_scm_assign_mem);
+
+static int qcom_scm_probe(struct platform_device *pdev)
+{
+ struct qcom_scm *scm;
+ unsigned long clks;
+ int ret;
+
+ scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
+ if (!scm)
+ return -ENOMEM;
+
+ ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
+ if (ret < 0)
+ return ret;
+
+ clks = (unsigned long)of_device_get_match_data(&pdev->dev);
+ if (clks & SCM_HAS_CORE_CLK) {
+ scm->core_clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(scm->core_clk)) {
+ if (PTR_ERR(scm->core_clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "failed to acquire core clk\n");
+ return PTR_ERR(scm->core_clk);
+ }
+ }
+
+ if (clks & SCM_HAS_IFACE_CLK) {
+ scm->iface_clk = devm_clk_get(&pdev->dev, "iface");
+ if (IS_ERR(scm->iface_clk)) {
+ if (PTR_ERR(scm->iface_clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "failed to acquire iface clk\n");
+ return PTR_ERR(scm->iface_clk);
+ }
+ }
+
+ if (clks & SCM_HAS_BUS_CLK) {
+ scm->bus_clk = devm_clk_get(&pdev->dev, "bus");
+ if (IS_ERR(scm->bus_clk)) {
+ if (PTR_ERR(scm->bus_clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "failed to acquire bus clk\n");
+ return PTR_ERR(scm->bus_clk);
+ }
+ }
+
+ scm->reset.ops = &qcom_scm_pas_reset_ops;
+ scm->reset.nr_resets = 1;
+ scm->reset.of_node = pdev->dev.of_node;
+ ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
+ if (ret)
+ return ret;
+
+ /* vote for max clk rate for highest performance */
+ ret = clk_set_rate(scm->core_clk, INT_MAX);
+ if (ret)
+ return ret;
+
+ __scm = scm;
+ __scm->dev = &pdev->dev;
+
+ __qcom_scm_init();
+
+ /*
+ * If requested enable "download mode", from this point on warmboot
+ * will cause the the boot stages to enter download mode, unless
+ * disabled below by a clean shutdown/reboot.
+ */
+ if (download_mode)
+ qcom_scm_set_download_mode(true);
+
+ return 0;
+}
+
+static void qcom_scm_shutdown(struct platform_device *pdev)
+{
+ /* Clean shutdown, disable download mode to allow normal restart */
+ if (download_mode)
+ qcom_scm_set_download_mode(false);
+}
+
+static const struct of_device_id qcom_scm_dt_match[] = {
+ { .compatible = "qcom,scm-apq8064",
+ /* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */
+ },
+ { .compatible = "qcom,scm-msm8660",
+ .data = (void *) SCM_HAS_CORE_CLK,
+ },
+ { .compatible = "qcom,scm-msm8960",
+ .data = (void *) SCM_HAS_CORE_CLK,
+ },
+ { .compatible = "qcom,scm-msm8996",
+ .data = NULL, /* no clocks */
+ },
+<<<<<<<
+ { .compatible = "qcom,scm-ipq4019",
+ .data = NULL, /* no clocks */
+ },
+=======
+ { .compatible = "qcom,scm-msm8998" },
+>>>>>>>
+ { .compatible = "qcom,scm",
+ .data = (void *)(SCM_HAS_CORE_CLK
+ | SCM_HAS_IFACE_CLK
+ | SCM_HAS_BUS_CLK),
+ },
+ {}
+};
+
+static struct platform_driver qcom_scm_driver = {
+ .driver = {
+ .name = "qcom_scm",
+ .of_match_table = qcom_scm_dt_match,
+ },
+ .probe = qcom_scm_probe,
+ .shutdown = qcom_scm_shutdown,
+};
+
+static int __init qcom_scm_init(void)
+{
+ return platform_driver_register(&qcom_scm_driver);
+}
+subsys_initcall(qcom_scm_init);
diff --git a/rr-cache/4a8797e491ea33c914db6a50118dd923972b062c/preimage b/rr-cache/4a8797e491ea33c914db6a50118dd923972b062c/preimage
new file mode 100644
index 0000000..7d59737
--- /dev/null
+++ b/rr-cache/4a8797e491ea33c914db6a50118dd923972b062c/preimage
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_QCOM_GENI_SE) += qcom-geni-se.o
+obj-$(CONFIG_QCOM_COMMAND_DB) += cmd-db.o
+<<<<<<<
+=======
+CFLAGS_rpmh-rsc.o := -I$(src)
+>>>>>>>
+obj-$(CONFIG_QCOM_GLINK_SSR) += glink_ssr.o
+obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
+obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o
+obj-$(CONFIG_QCOM_PM) += spm.o
+obj-$(CONFIG_QCOM_QMI_HELPERS) += qmi_helpers.o
+qmi_helpers-y += qmi_encdec.o qmi_interface.o
+obj-$(CONFIG_QCOM_RMTFS_MEM) += rmtfs_mem.o
+obj-$(CONFIG_QCOM_RPMH) += qcom_rpmh.o
+qcom_rpmh-y += rpmh-rsc.o
+qcom_rpmh-y += rpmh.o
+obj-$(CONFIG_QCOM_SMD_RPM) += smd-rpm.o
+obj-$(CONFIG_QCOM_SMEM) += smem.o
+obj-$(CONFIG_QCOM_SMEM_STATE) += smem_state.o
+obj-$(CONFIG_QCOM_SMP2P) += smp2p.o
+obj-$(CONFIG_QCOM_SMSM) += smsm.o
+obj-$(CONFIG_QCOM_WCNSS_CTRL) += wcnss_ctrl.o
+<<<<<<<
+=======
+obj-$(CONFIG_ARCH_MSM8996) += kryo-l2-accessors.o
+>>>>>>>
+obj-$(CONFIG_QCOM_APR) += apr.o
diff --git a/rr-cache/4dc2a85bc024568d1d44bbde78440fbdd72486b9/preimage b/rr-cache/4dc2a85bc024568d1d44bbde78440fbdd72486b9/preimage
new file mode 100644
index 0000000..27160c7
--- /dev/null
+++ b/rr-cache/4dc2a85bc024568d1d44bbde78440fbdd72486b9/preimage
@@ -0,0 +1,1891 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+#include "skeleton.dtsi"
+#include <dt-bindings/clock/qcom,gcc-msm8960.h>
+#include <dt-bindings/reset/qcom,gcc-msm8960.h>
+#include <dt-bindings/clock/qcom,mmcc-msm8960.h>
+#include <dt-bindings/clock/qcom,rpmcc.h>
+#include <dt-bindings/soc/qcom,gsbi.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/soc/qcom,apr.h>
+/ {
+ model = "Qualcomm APQ8064";
+ compatible = "qcom,apq8064";
+ interrupt-parent = <&intc>;
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ smem_region: smem@80000000 {
+ reg = <0x80000000 0x200000>;
+ no-map;
+ };
+
+ wcnss_mem: wcnss@8f000000 {
+ reg = <0x8f000000 0x700000>;
+ no-map;
+ };
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ CPU0: cpu@0 {
+ compatible = "qcom,krait";
+ enable-method = "qcom,kpss-acc-v1";
+ device_type = "cpu";
+ reg = <0>;
+ next-level-cache = <&L2>;
+ qcom,acc = <&acc0>;
+ qcom,saw = <&saw0>;
+ cpu-idle-states = <&CPU_SPC>;
+ };
+
+ CPU1: cpu@1 {
+ compatible = "qcom,krait";
+ enable-method = "qcom,kpss-acc-v1";
+ device_type = "cpu";
+ reg = <1>;
+ next-level-cache = <&L2>;
+ qcom,acc = <&acc1>;
+ qcom,saw = <&saw1>;
+ cpu-idle-states = <&CPU_SPC>;
+ };
+
+ CPU2: cpu@2 {
+ compatible = "qcom,krait";
+ enable-method = "qcom,kpss-acc-v1";
+ device_type = "cpu";
+ reg = <2>;
+ next-level-cache = <&L2>;
+ qcom,acc = <&acc2>;
+ qcom,saw = <&saw2>;
+ cpu-idle-states = <&CPU_SPC>;
+ };
+
+ CPU3: cpu@3 {
+ compatible = "qcom,krait";
+ enable-method = "qcom,kpss-acc-v1";
+ device_type = "cpu";
+ reg = <3>;
+ next-level-cache = <&L2>;
+ qcom,acc = <&acc3>;
+ qcom,saw = <&saw3>;
+ cpu-idle-states = <&CPU_SPC>;
+ };
+
+ L2: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ };
+
+ idle-states {
+ CPU_SPC: spc {
+ compatible = "qcom,idle-state-spc",
+ "arm,idle-state";
+ entry-latency-us = <400>;
+ exit-latency-us = <900>;
+ min-residency-us = <3000>;
+ };
+ };
+ };
+
+ thermal-zones {
+ cpu-thermal0 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&gcc 7>;
+ coefficients = <1199 0>;
+
+ trips {
+ cpu_alert0: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit0: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal1 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&gcc 8>;
+ coefficients = <1132 0>;
+
+ trips {
+ cpu_alert1: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit1: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal2 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&gcc 9>;
+ coefficients = <1199 0>;
+
+ trips {
+ cpu_alert2: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit2: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal3 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&gcc 10>;
+ coefficients = <1132 0>;
+
+ trips {
+ cpu_alert3: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit3: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+ };
+
+ cpu-pmu {
+ compatible = "qcom,krait-pmu";
+ interrupts = <1 10 0x304>;
+ };
+
+ clocks {
+ cxo_board: cxo_board {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <19200000>;
+ };
+
+ pxo_board {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <27000000>;
+ };
+
+ sleep_clk: sleep_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ };
+ };
+
+ sfpb_mutex: hwmutex {
+ compatible = "qcom,sfpb-mutex";
+ syscon = <&sfpb_wrapper_mutex 0x604 0x4>;
+ #hwlock-cells = <1>;
+ };
+
+ smem {
+ compatible = "qcom,smem";
+ memory-region = <&smem_region>;
+
+ hwlocks = <&sfpb_mutex 3>;
+ };
+
+ smd {
+ compatible = "qcom,smd";
+
+ modem@0 {
+ interrupts = <0 37 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,ipc = <&l2cc 8 3>;
+ qcom,smd-edge = <0>;
+
+ status = "disabled";
+ };
+
+ q6@1 {
+ interrupts = <0 90 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,ipc = <&l2cc 8 15>;
+ qcom,smd-edge = <1>;
+ apr {
+ compatible = "qcom,apr-v2";
+ qcom,smd-channels = "apr_audio_svc";
+ qcom,apr-dest-domain-id = <APR_DOMAIN_ADSP>;
+
+ q6core {
+ qcom,apr-svc-name = "CORE";
+ qcom,apr-svc-id = <APR_SVC_ADSP_CORE>;
+ compatible = "qcom,q6core";
+ };
+
+ q6afe: q6afe {
+ compatible = "qcom,q6afe";
+ qcom,apr-svc-name = "AFE";
+ qcom,apr-svc-id = <APR_SVC_AFE>;
+ #sound-dai-cells = <1>;
+ };
+
+ q6asm: q6asm {
+ compatible = "qcom,q6asm";
+ qcom,apr-svc-name = "ASM";
+ qcom,apr-svc-id = <APR_SVC_ASM>;
+ #sound-dai-cells = <1>;
+ };
+
+ q6adm: q6adm {
+ compatible = "qcom,q6adm";
+ qcom,apr-svc-name = "ADM";
+ qcom,apr-svc-id = <APR_SVC_ADM>;
+ #sound-dai-cells = <0>;
+ };
+
+ };
+
+ };
+
+ dsps@3 {
+ interrupts = <0 138 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,ipc = <&sps_sic_non_secure 0x4080 0>;
+ qcom,smd-edge = <3>;
+
+ status = "disabled";
+ };
+
+ riva@6 {
+ interrupts = <0 198 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,ipc = <&l2cc 8 25>;
+ qcom,smd-edge = <6>;
+
+ status = "disabled";
+ };
+ };
+
+ smsm {
+ compatible = "qcom,smsm";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,ipc-1 = <&l2cc 8 4>;
+ qcom,ipc-2 = <&l2cc 8 14>;
+ qcom,ipc-3 = <&l2cc 8 23>;
+ qcom,ipc-4 = <&sps_sic_non_secure 0x4094 0>;
+
+ apps_smsm: apps@0 {
+ reg = <0>;
+ #qcom,smem-state-cells = <1>;
+ };
+
+ modem_smsm: modem@1 {
+ reg = <1>;
+ interrupts = <0 38 IRQ_TYPE_EDGE_RISING>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ q6_smsm: q6@2 {
+ reg = <2>;
+ interrupts = <0 89 IRQ_TYPE_EDGE_RISING>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ wcnss_smsm: wcnss@3 {
+ reg = <3>;
+ interrupts = <0 204 IRQ_TYPE_EDGE_RISING>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ dsps_smsm: dsps@4 {
+ reg = <4>;
+ interrupts = <0 137 IRQ_TYPE_EDGE_RISING>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ firmware {
+ scm {
+ compatible = "qcom,scm-apq8064";
+
+ clocks = <&rpmcc RPM_DAYTONA_FABRIC_CLK>;
+ clock-names = "core";
+ };
+ };
+
+ /*
+ * These channels from the ADC are simply hardware monitors.
+ * That is why the ADC is referred to as "HKADC" - HouseKeeping
+ * ADC.
+ */
+ iio-hwmon {
+ compatible = "iio-hwmon";
+ io-channels = <&xoadc 0x00 0x01>, /* Battery */
+ <&xoadc 0x00 0x02>, /* DC in (charger) */
+ <&xoadc 0x00 0x04>, /* VPH the main system voltage */
+ <&xoadc 0x00 0x0b>, /* Die temperature */
+ <&xoadc 0x00 0x0c>, /* Reference voltage 1.25V */
+ <&xoadc 0x00 0x0d>, /* Reference voltage 0.625V */
+ <&xoadc 0x00 0x0e>; /* Charger temperature */
+ };
+
+ clocks {
+ sleep_clk: sleep_clk {
+ compatible = "fixed-clock";
+ clock-frequency = <32768>;
+ #clock-cells = <0>;
+ };
+ };
+
+ soc: soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ compatible = "simple-bus";
+
+ tlmm_pinmux: pinctrl@800000 {
+ compatible = "qcom,apq8064-pinctrl";
+ reg = <0x800000 0x4000>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <0 16 IRQ_TYPE_LEVEL_HIGH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&ps_hold>;
+ };
+
+ sfpb_wrapper_mutex: syscon@1200000 {
+ compatible = "syscon";
+ reg = <0x01200000 0x8000>;
+ };
+
+ intc: interrupt-controller@2000000 {
+ compatible = "qcom,msm-qgic2";
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ reg = <0x02000000 0x1000>,
+ <0x02002000 0x1000>;
+ };
+
+ timer@200a000 {
+ compatible = "qcom,kpss-timer",
+ "qcom,kpss-wdt-apq8064", "qcom,msm-timer";
+ interrupts = <1 1 0x301>,
+ <1 2 0x301>,
+ <1 3 0x301>;
+ reg = <0x0200a000 0x100>;
+ clock-frequency = <27000000>,
+ <32768>;
+ cpu-offset = <0x80000>;
+ };
+
+ watchdog@208a038 {
+ compatible = "qcom,kpss-wdt-apq8064";
+ reg = <0x0208a038 0x40>;
+ clocks = <&sleep_clk>;
+ timeout-sec = <10>;
+ };
+
+ acc0: clock-controller@2088000 {
+ compatible = "qcom,kpss-acc-v1";
+ reg = <0x02088000 0x1000>, <0x02008000 0x1000>;
+ };
+
+ acc1: clock-controller@2098000 {
+ compatible = "qcom,kpss-acc-v1";
+ reg = <0x02098000 0x1000>, <0x02008000 0x1000>;
+ };
+
+ acc2: clock-controller@20a8000 {
+ compatible = "qcom,kpss-acc-v1";
+ reg = <0x020a8000 0x1000>, <0x02008000 0x1000>;
+ };
+
+ acc3: clock-controller@20b8000 {
+ compatible = "qcom,kpss-acc-v1";
+ reg = <0x020b8000 0x1000>, <0x02008000 0x1000>;
+ };
+
+ saw0: power-controller@2089000 {
+ compatible = "qcom,apq8064-saw2-v1.1-cpu", "qcom,saw2";
+ reg = <0x02089000 0x1000>, <0x02009000 0x1000>;
+ regulator;
+ };
+
+ saw1: power-controller@2099000 {
+ compatible = "qcom,apq8064-saw2-v1.1-cpu", "qcom,saw2";
+ reg = <0x02099000 0x1000>, <0x02009000 0x1000>;
+ regulator;
+ };
+
+ saw2: power-controller@20a9000 {
+ compatible = "qcom,apq8064-saw2-v1.1-cpu", "qcom,saw2";
+ reg = <0x020a9000 0x1000>, <0x02009000 0x1000>;
+ regulator;
+ };
+
+ saw3: power-controller@20b9000 {
+ compatible = "qcom,apq8064-saw2-v1.1-cpu", "qcom,saw2";
+ reg = <0x020b9000 0x1000>, <0x02009000 0x1000>;
+ regulator;
+ };
+
+ sps_sic_non_secure: sps-sic-non-secure@12100000 {
+ compatible = "syscon";
+ reg = <0x12100000 0x10000>;
+ };
+
+ gsbi1: gsbi@12440000 {
+ status = "disabled";
+ compatible = "qcom,gsbi-v1.0.0";
+ cell-index = <1>;
+ reg = <0x12440000 0x100>;
+ clocks = <&gcc GSBI1_H_CLK>;
+ clock-names = "iface";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ syscon-tcsr = <&tcsr>;
+
+ gsbi1_serial: serial@12450000 {
+ compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
+ reg = <0x12450000 0x100>,
+ <0x12400000 0x03>;
+ interrupts = <0 193 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GSBI1_UART_CLK>, <&gcc GSBI1_H_CLK>;
+ clock-names = "core", "iface";
+ status = "disabled";
+ };
+
+ gsbi1_i2c: i2c@12460000 {
+ compatible = "qcom,i2c-qup-v1.1.1";
+ pinctrl-0 = <&i2c1_pins>;
+ pinctrl-1 = <&i2c1_pins_sleep>;
+ pinctrl-names = "default", "sleep";
+ reg = <0x12460000 0x1000>;
+ interrupts = <0 194 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GSBI1_QUP_CLK>, <&gcc GSBI1_H_CLK>;
+ clock-names = "core", "iface";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ };
+
+ gsbi2: gsbi@12480000 {
+ status = "disabled";
+ compatible = "qcom,gsbi-v1.0.0";
+ cell-index = <2>;
+ reg = <0x12480000 0x100>;
+ clocks = <&gcc GSBI2_H_CLK>;
+ clock-names = "iface";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ syscon-tcsr = <&tcsr>;
+
+ gsbi2_i2c: i2c@124a0000 {
+ compatible = "qcom,i2c-qup-v1.1.1";
+ reg = <0x124a0000 0x1000>;
+ pinctrl-0 = <&i2c2_pins>;
+ pinctrl-1 = <&i2c2_pins_sleep>;
+ pinctrl-names = "default", "sleep";
+ interrupts = <0 196 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GSBI2_QUP_CLK>, <&gcc GSBI2_H_CLK>;
+ clock-names = "core", "iface";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+ };
+
+ gsbi3: gsbi@16200000 {
+ status = "disabled";
+ compatible = "qcom,gsbi-v1.0.0";
+ cell-index = <3>;
+ reg = <0x16200000 0x100>;
+ clocks = <&gcc GSBI3_H_CLK>;
+ clock-names = "iface";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ gsbi3_i2c: i2c@16280000 {
+ compatible = "qcom,i2c-qup-v1.1.1";
+ pinctrl-0 = <&i2c3_pins>;
+ pinctrl-1 = <&i2c3_pins_sleep>;
+ pinctrl-names = "default", "sleep";
+ reg = <0x16280000 0x1000>;
+ interrupts = <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GSBI3_QUP_CLK>,
+ <&gcc GSBI3_H_CLK>;
+ clock-names = "core", "iface";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+ };
+
+ gsbi4: gsbi@16300000 {
+ status = "disabled";
+ compatible = "qcom,gsbi-v1.0.0";
+ cell-index = <4>;
+ reg = <0x16300000 0x03>;
+ clocks = <&gcc GSBI4_H_CLK>;
+ clock-names = "iface";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ gsbi4_i2c: i2c@16380000 {
+ compatible = "qcom,i2c-qup-v1.1.1";
+ pinctrl-0 = <&i2c4_pins>;
+ pinctrl-1 = <&i2c4_pins_sleep>;
+ pinctrl-names = "default", "sleep";
+ reg = <0x16380000 0x1000>;
+ interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GSBI4_QUP_CLK>,
+ <&gcc GSBI4_H_CLK>;
+ clock-names = "core", "iface";
+ status = "disabled";
+ };
+ };
+
+ gsbi5: gsbi@1a200000 {
+ status = "disabled";
+ compatible = "qcom,gsbi-v1.0.0";
+ cell-index = <5>;
+ reg = <0x1a200000 0x03>;
+ clocks = <&gcc GSBI5_H_CLK>;
+ clock-names = "iface";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ gsbi5_serial: serial@1a240000 {
+ compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
+ reg = <0x1a240000 0x100>,
+ <0x1a200000 0x03>;
+ interrupts = <0 154 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GSBI5_UART_CLK>, <&gcc GSBI5_H_CLK>;
+ clock-names = "core", "iface";
+ status = "disabled";
+ };
+
+ gsbi5_spi: spi@1a280000 {
+ compatible = "qcom,spi-qup-v1.1.1";
+ reg = <0x1a280000 0x1000>;
+ interrupts = <0 155 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&spi5_default>;
+ pinctrl-1 = <&spi5_sleep>;
+ pinctrl-names = "default", "sleep";
+ clocks = <&gcc GSBI5_QUP_CLK>, <&gcc GSBI5_H_CLK>;
+ clock-names = "core", "iface";
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
+ gsbi6: gsbi@16500000 {
+ status = "disabled";
+ compatible = "qcom,gsbi-v1.0.0";
+ cell-index = <6>;
+ reg = <0x16500000 0x03>;
+ clocks = <&gcc GSBI6_H_CLK>;
+ clock-names = "iface";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ syscon-tcsr = <&tcsr>;
+
+ gsbi6_serial: serial@16540000 {
+ compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
+ reg = <0x16540000 0x100>,
+ <0x16500000 0x03>;
+ interrupts = <0 156 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GSBI6_UART_CLK>, <&gcc GSBI6_H_CLK>;
+ clock-names = "core", "iface";
+
+ qcom,rx-crci = <11>;
+ qcom,tx-crci = <6>;
+
+ dmas = <&adm 6>, <&adm 7>;
+ dma-names = "rx", "tx";
+
+ status = "disabled";
+ };
+
+ gsbi6_i2c: i2c@16580000 {
+ compatible = "qcom,i2c-qup-v1.1.1";
+ pinctrl-0 = <&i2c6_pins>;
+ pinctrl-1 = <&i2c6_pins_sleep>;
+ pinctrl-names = "default", "sleep";
+ reg = <0x16580000 0x1000>;
+ interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GSBI6_QUP_CLK>,
+ <&gcc GSBI6_H_CLK>;
+ clock-names = "core", "iface";
+ status = "disabled";
+ };
+ };
+
+ gsbi7: gsbi@16600000 {
+ status = "disabled";
+ compatible = "qcom,gsbi-v1.0.0";
+ cell-index = <7>;
+ reg = <0x16600000 0x100>;
+ clocks = <&gcc GSBI7_H_CLK>;
+ clock-names = "iface";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ syscon-tcsr = <&tcsr>;
+
+ gsbi7_serial: serial@16640000 {
+ compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
+ reg = <0x16640000 0x1000>,
+ <0x16600000 0x1000>;
+ interrupts = <0 158 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GSBI7_UART_CLK>, <&gcc GSBI7_H_CLK>;
+ clock-names = "core", "iface";
+ status = "disabled";
+ };
+
+ gsbi7_i2c: i2c@16680000 {
+ compatible = "qcom,i2c-qup-v1.1.1";
+ pinctrl-0 = <&i2c7_pins>;
+ pinctrl-1 = <&i2c7_pins_sleep>;
+ pinctrl-names = "default", "sleep";
+ reg = <0x16680000 0x1000>;
+ interrupts = <GIC_SPI 159 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GSBI7_QUP_CLK>,
+ <&gcc GSBI7_H_CLK>;
+ clock-names = "core", "iface";
+ status = "disabled";
+ };
+ };
+
+ rng@1a500000 {
+ compatible = "qcom,prng";
+ reg = <0x1a500000 0x200>;
+ clocks = <&gcc PRNG_CLK>;
+ clock-names = "core";
+ };
+
+ ssbi@c00000 {
+ compatible = "qcom,ssbi";
+ reg = <0x00c00000 0x1000>;
+ qcom,controller-type = "pmic-arbiter";
+
+ pm8821: pmic@1 {
+ compatible = "qcom,pm8821";
+ interrupt-parent = <&tlmm_pinmux>;
+ interrupts = <76 IRQ_TYPE_LEVEL_LOW>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pm8821_mpps: mpps@50 {
+ compatible = "qcom,pm8821-mpp", "qcom,ssbi-mpp";
+ reg = <0x50>;
+ interrupts = <24 IRQ_TYPE_NONE>,
+ <25 IRQ_TYPE_NONE>,
+ <26 IRQ_TYPE_NONE>,
+ <27 IRQ_TYPE_NONE>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+ };
+ };
+
+ qcom,ssbi@500000 {
+ compatible = "qcom,ssbi";
+ reg = <0x00500000 0x1000>;
+ qcom,controller-type = "pmic-arbiter";
+
+ pmicintc: pmic@0 {
+ compatible = "qcom,pm8921";
+ interrupt-parent = <&tlmm_pinmux>;
+ interrupts = <74 8>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pm8921_gpio: gpio@150 {
+
+ compatible = "qcom,pm8921-gpio",
+ "qcom,ssbi-gpio";
+ reg = <0x150>;
+ interrupts = <192 IRQ_TYPE_NONE>,
+ <193 IRQ_TYPE_NONE>,
+ <194 IRQ_TYPE_NONE>,
+ <195 IRQ_TYPE_NONE>,
+ <196 IRQ_TYPE_NONE>,
+ <197 IRQ_TYPE_NONE>,
+ <198 IRQ_TYPE_NONE>,
+ <199 IRQ_TYPE_NONE>,
+ <200 IRQ_TYPE_NONE>,
+ <201 IRQ_TYPE_NONE>,
+ <202 IRQ_TYPE_NONE>,
+ <203 IRQ_TYPE_NONE>,
+ <204 IRQ_TYPE_NONE>,
+ <205 IRQ_TYPE_NONE>,
+ <206 IRQ_TYPE_NONE>,
+ <207 IRQ_TYPE_NONE>,
+ <208 IRQ_TYPE_NONE>,
+ <209 IRQ_TYPE_NONE>,
+ <210 IRQ_TYPE_NONE>,
+ <211 IRQ_TYPE_NONE>,
+ <212 IRQ_TYPE_NONE>,
+ <213 IRQ_TYPE_NONE>,
+ <214 IRQ_TYPE_NONE>,
+ <215 IRQ_TYPE_NONE>,
+ <216 IRQ_TYPE_NONE>,
+ <217 IRQ_TYPE_NONE>,
+ <218 IRQ_TYPE_NONE>,
+ <219 IRQ_TYPE_NONE>,
+ <220 IRQ_TYPE_NONE>,
+ <221 IRQ_TYPE_NONE>,
+ <222 IRQ_TYPE_NONE>,
+ <223 IRQ_TYPE_NONE>,
+ <224 IRQ_TYPE_NONE>,
+ <225 IRQ_TYPE_NONE>,
+ <226 IRQ_TYPE_NONE>,
+ <227 IRQ_TYPE_NONE>,
+ <228 IRQ_TYPE_NONE>,
+ <229 IRQ_TYPE_NONE>,
+ <230 IRQ_TYPE_NONE>,
+ <231 IRQ_TYPE_NONE>,
+ <232 IRQ_TYPE_NONE>,
+ <233 IRQ_TYPE_NONE>,
+ <234 IRQ_TYPE_NONE>,
+ <235 IRQ_TYPE_NONE>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ };
+
+ pm8921_mpps: mpps@50 {
+ compatible = "qcom,pm8921-mpp",
+ "qcom,ssbi-mpp";
+ reg = <0x50>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupts =
+ <128 IRQ_TYPE_NONE>,
+ <129 IRQ_TYPE_NONE>,
+ <130 IRQ_TYPE_NONE>,
+ <131 IRQ_TYPE_NONE>,
+ <132 IRQ_TYPE_NONE>,
+ <133 IRQ_TYPE_NONE>,
+ <134 IRQ_TYPE_NONE>,
+ <135 IRQ_TYPE_NONE>,
+ <136 IRQ_TYPE_NONE>,
+ <137 IRQ_TYPE_NONE>,
+ <138 IRQ_TYPE_NONE>,
+ <139 IRQ_TYPE_NONE>;
+ };
+
+ rtc@11d {
+ compatible = "qcom,pm8921-rtc";
+ interrupt-parent = <&pmicintc>;
+ interrupts = <39 1>;
+ reg = <0x11d>;
+ allow-set-time;
+ };
+
+ pwrkey@1c {
+ compatible = "qcom,pm8921-pwrkey";
+ reg = <0x1c>;
+ interrupt-parent = <&pmicintc>;
+ interrupts = <50 1>, <51 1>;
+ debounce = <15625>;
+ pull-up;
+ };
+
+ xoadc: xoadc@197 {
+ compatible = "qcom,pm8921-adc";
+ reg = <197>;
+ interrupts-extended = <&pmicintc 78 IRQ_TYPE_EDGE_RISING>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ #io-channel-cells = <2>;
+
+ vcoin: adc-channel@00 {
+ reg = <0x00 0x00>;
+ };
+ vbat: adc-channel@01 {
+ reg = <0x00 0x01>;
+ };
+ dcin: adc-channel@02 {
+ reg = <0x00 0x02>;
+ };
+ vph_pwr: adc-channel@04 {
+ reg = <0x00 0x04>;
+ };
+ batt_therm: adc-channel@08 {
+ reg = <0x00 0x08>;
+ };
+ batt_id: adc-channel@09 {
+ reg = <0x00 0x09>;
+ };
+ usb_vbus: adc-channel@0a {
+ reg = <0x00 0x0a>;
+ };
+ die_temp: adc-channel@0b {
+ reg = <0x00 0x0b>;
+ };
+ ref_625mv: adc-channel@0c {
+ reg = <0x00 0x0c>;
+ };
+ ref_1250mv: adc-channel@0d {
+ reg = <0x00 0x0d>;
+ };
+ chg_temp: adc-channel@0e {
+ reg = <0x00 0x0e>;
+ };
+ ref_muxoff: adc-channel@0f {
+ reg = <0x00 0x0f>;
+ };
+ };
+ };
+ };
+
+ qfprom: qfprom@700000 {
+ compatible = "qcom,qfprom";
+ reg = <0x00700000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ tsens_calib: calib {
+ reg = <0x404 0x10>;
+ };
+ tsens_backup: backup_calib {
+ reg = <0x414 0x10>;
+ };
+ };
+
+ gcc: clock-controller@900000 {
+ compatible = "qcom,gcc-apq8064";
+ reg = <0x00900000 0x4000>;
+ nvmem-cells = <&tsens_calib>, <&tsens_backup>;
+ nvmem-cell-names = "calib", "calib_backup";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #thermal-sensor-cells = <1>;
+ };
+
+ lcc: clock-controller@28000000 {
+ compatible = "qcom,lcc-apq8064";
+ reg = <0x28000000 0x1000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
+ mmcc: clock-controller@4000000 {
+ compatible = "qcom,mmcc-apq8064";
+ reg = <0x4000000 0x1000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
+ l2cc: clock-controller@2011000 {
+ compatible = "syscon";
+ reg = <0x2011000 0x1000>;
+ };
+
+ rpm@108000 {
+ compatible = "qcom,rpm-apq8064";
+ reg = <0x108000 0x1000>;
+ qcom,ipc = <&l2cc 0x8 2>;
+
+ interrupts = <GIC_SPI 19 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 21 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 22 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "ack", "err", "wakeup";
+
+ rpmcc: clock-controller {
+ compatible = "qcom,rpmcc-apq8064", "qcom,rpmcc";
+ #clock-cells = <1>;
+ };
+
+ regulators {
+ compatible = "qcom,rpm-pm8921-regulators";
+
+ pm8921_s1: s1 {};
+ pm8921_s2: s2 {};
+ pm8921_s3: s3 {};
+ pm8921_s4: s4 {};
+ pm8921_s7: s7 {};
+ pm8921_s8: s8 {};
+
+ pm8921_l1: l1 {};
+ pm8921_l2: l2 {};
+ pm8921_l3: l3 {};
+ pm8921_l4: l4 {};
+ pm8921_l5: l5 {};
+ pm8921_l6: l6 {};
+ pm8921_l7: l7 {};
+ pm8921_l8: l8 {};
+ pm8921_l9: l9 {};
+ pm8921_l10: l10 {};
+ pm8921_l11: l11 {};
+ pm8921_l12: l12 {};
+ pm8921_l14: l14 {};
+ pm8921_l15: l15 {};
+ pm8921_l16: l16 {};
+ pm8921_l17: l17 {};
+ pm8921_l18: l18 {};
+ pm8921_l21: l21 {};
+ pm8921_l22: l22 {};
+ pm8921_l23: l23 {};
+ pm8921_l24: l24 {};
+ pm8921_l25: l25 {};
+ pm8921_l26: l26 {};
+ pm8921_l27: l27 {};
+ pm8921_l28: l28 {};
+ pm8921_l29: l29 {};
+
+ pm8921_lvs1: lvs1 {};
+ pm8921_lvs2: lvs2 {};
+ pm8921_lvs3: lvs3 {};
+ pm8921_lvs4: lvs4 {};
+ pm8921_lvs5: lvs5 {};
+ pm8921_lvs6: lvs6 {};
+ pm8921_lvs7: lvs7 {};
+
+ pm8921_usb_switch: usb-switch {};
+
+ pm8921_hdmi_switch: hdmi-switch {
+ bias-pull-down;
+ };
+
+ pm8921_ncp: ncp {};
+ };
+ };
+
+ usb1: usb@12500000 {
+ compatible = "qcom,ci-hdrc";
+ reg = <0x12500000 0x200>,
+ <0x12500200 0x200>;
+ interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc USB_HS1_XCVR_CLK>, <&gcc USB_HS1_H_CLK>;
+ clock-names = "core", "iface";
+ assigned-clocks = <&gcc USB_HS1_XCVR_CLK>;
+ assigned-clock-rates = <60000000>;
+ resets = <&gcc USB_HS1_RESET>;
+ reset-names = "core";
+ phy_type = "ulpi";
+ ahb-burst-config = <0>;
+ phys = <&usb_hs1_phy>;
+ phy-names = "usb-phy";
+ status = "disabled";
+ #reset-cells = <1>;
+
+ ulpi {
+ usb_hs1_phy: phy {
+ compatible = "qcom,usb-hs-phy-apq8064",
+ "qcom,usb-hs-phy";
+ clocks = <&sleep_clk>, <&cxo_board>;
+ clock-names = "sleep", "ref";
+ resets = <&usb1 0>;
+ reset-names = "por";
+ #phy-cells = <0>;
+ };
+ };
+ };
+
+ usb3: usb@12520000 {
+ compatible = "qcom,ci-hdrc";
+ reg = <0x12520000 0x200>,
+ <0x12520200 0x200>;
+ interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc USB_HS3_XCVR_CLK>, <&gcc USB_HS3_H_CLK>;
+ clock-names = "core", "iface";
+ assigned-clocks = <&gcc USB_HS3_XCVR_CLK>;
+ assigned-clock-rates = <60000000>;
+ resets = <&gcc USB_HS3_RESET>;
+ reset-names = "core";
+ phy_type = "ulpi";
+ ahb-burst-config = <0>;
+ phys = <&usb_hs3_phy>;
+ phy-names = "usb-phy";
+ status = "disabled";
+ #reset-cells = <1>;
+
+ ulpi {
+ usb_hs3_phy: phy {
+ compatible = "qcom,usb-hs-phy-apq8064",
+ "qcom,usb-hs-phy";
+ #phy-cells = <0>;
+ clocks = <&sleep_clk>, <&cxo_board>;
+ clock-names = "sleep", "ref";
+ resets = <&usb3 0>;
+ reset-names = "por";
+ };
+ };
+ };
+
+ usb4: usb@12530000 {
+ compatible = "qcom,ci-hdrc";
+ reg = <0x12530000 0x200>,
+ <0x12530200 0x200>;
+ interrupts = <GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc USB_HS4_XCVR_CLK>, <&gcc USB_HS4_H_CLK>;
+ clock-names = "core", "iface";
+ assigned-clocks = <&gcc USB_HS4_XCVR_CLK>;
+ assigned-clock-rates = <60000000>;
+ resets = <&gcc USB_HS4_RESET>;
+ reset-names = "core";
+ phy_type = "ulpi";
+ ahb-burst-config = <0>;
+ phys = <&usb_hs4_phy>;
+ phy-names = "usb-phy";
+ status = "disabled";
+ #reset-cells = <1>;
+
+ ulpi {
+ usb_hs4_phy: phy {
+ compatible = "qcom,usb-hs-phy-apq8064",
+ "qcom,usb-hs-phy";
+ #phy-cells = <0>;
+ clocks = <&sleep_clk>, <&cxo_board>;
+ clock-names = "sleep", "ref";
+ resets = <&usb4 0>;
+ reset-names = "por";
+ };
+ };
+ };
+
+ sata_phy0: phy@1b400000 {
+ compatible = "qcom,apq8064-sata-phy";
+ status = "disabled";
+ reg = <0x1b400000 0x200>;
+ reg-names = "phy_mem";
+ clocks = <&gcc SATA_PHY_CFG_CLK>;
+ clock-names = "cfg";
+ #phy-cells = <0>;
+ };
+
+ sata0: sata@29000000 {
+ compatible = "qcom,apq8064-ahci", "generic-ahci";
+ status = "disabled";
+ reg = <0x29000000 0x180>;
+ interrupts = <GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc SFAB_SATA_S_H_CLK>,
+ <&gcc SATA_H_CLK>,
+ <&gcc SATA_A_CLK>,
+ <&gcc SATA_RXOOB_CLK>,
+ <&gcc SATA_PMALIVE_CLK>;
+ clock-names = "slave_iface",
+ "iface",
+ "bus",
+ "rxoob",
+ "core_pmalive";
+
+ assigned-clocks = <&gcc SATA_RXOOB_CLK>,
+ <&gcc SATA_PMALIVE_CLK>;
+ assigned-clock-rates = <100000000>, <100000000>;
+
+ phys = <&sata_phy0>;
+ phy-names = "sata-phy";
+ ports-implemented = <0x1>;
+ };
+
+ /* Temporary fixed regulator */
+ sdcc1bam:dma@12402000{
+ compatible = "qcom,bam-v1.3.0";
+ reg = <0x12402000 0x8000>;
+ interrupts = <0 98 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc SDC1_H_CLK>;
+ clock-names = "bam_clk";
+ #dma-cells = <1>;
+ qcom,ee = <0>;
+ };
+
+ sdcc3bam:dma@12182000{
+ compatible = "qcom,bam-v1.3.0";
+ reg = <0x12182000 0x8000>;
+ interrupts = <0 96 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc SDC3_H_CLK>;
+ clock-names = "bam_clk";
+ #dma-cells = <1>;
+ qcom,ee = <0>;
+ };
+
+ sdcc4bam:dma@121c2000{
+ compatible = "qcom,bam-v1.3.0";
+ reg = <0x121c2000 0x8000>;
+ interrupts = <0 95 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc SDC4_H_CLK>;
+ clock-names = "bam_clk";
+ #dma-cells = <1>;
+ qcom,ee = <0>;
+ };
+
+ amba {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ sdcc1: sdcc@12400000 {
+ status = "disabled";
+ compatible = "arm,pl18x", "arm,primecell";
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdcc1_pins>;
+ arm,primecell-periphid = <0x00051180>;
+ reg = <0x12400000 0x2000>;
+ interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "cmd_irq";
+ clocks = <&gcc SDC1_CLK>, <&gcc SDC1_H_CLK>;
+ clock-names = "mclk", "apb_pclk";
+ bus-width = <8>;
+ max-frequency = <96000000>;
+ non-removable;
+ cap-sd-highspeed;
+ cap-mmc-highspeed;
+ dmas = <&sdcc1bam 2>, <&sdcc1bam 1>;
+ dma-names = "tx", "rx";
+ };
+
+ sdcc3: sdcc@12180000 {
+ compatible = "arm,pl18x", "arm,primecell";
+ arm,primecell-periphid = <0x00051180>;
+ status = "disabled";
+ reg = <0x12180000 0x2000>;
+ interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "cmd_irq";
+ clocks = <&gcc SDC3_CLK>, <&gcc SDC3_H_CLK>;
+ clock-names = "mclk", "apb_pclk";
+ bus-width = <4>;
+ cap-sd-highspeed;
+ cap-mmc-highspeed;
+ max-frequency = <192000000>;
+ no-1-8-v;
+ dmas = <&sdcc3bam 2>, <&sdcc3bam 1>;
+ dma-names = "tx", "rx";
+ };
+
+ sdcc4: sdcc@121c0000 {
+ compatible = "arm,pl18x", "arm,primecell";
+ arm,primecell-periphid = <0x00051180>;
+ status = "disabled";
+ reg = <0x121c0000 0x2000>;
+ interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "cmd_irq";
+ clocks = <&gcc SDC4_CLK>, <&gcc SDC4_H_CLK>;
+ clock-names = "mclk", "apb_pclk";
+ bus-width = <4>;
+ cap-sd-highspeed;
+ cap-mmc-highspeed;
+ max-frequency = <48000000>;
+ dmas = <&sdcc4bam 2>, <&sdcc4bam 1>;
+ dma-names = "tx", "rx";
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdc4_gpios>;
+ };
+ };
+
+ adm: dma@18320000 {
+ compatible = "qcom,adm";
+ reg = <0x18320000 0xE0000>;
+ interrupts = <GIC_SPI 171 IRQ_TYPE_NONE>;
+ #dma-cells = <1>;
+
+ clocks = <&gcc ADM0_CLK>, <&gcc ADM0_PBUS_CLK>;
+ clock-names = "core", "iface";
+
+ resets = <&gcc ADM0_RESET>,
+ <&gcc ADM0_PBUS_RESET>,
+ <&gcc ADM0_C0_RESET>,
+ <&gcc ADM0_C1_RESET>,
+ <&gcc ADM0_C2_RESET>;
+ reset-names = "clk", "pbus", "c0", "c1", "c2";
+ qcom,ee = <1>;
+
+ status = "disabled";
+ };
+
+ tcsr: syscon@1a400000 {
+ compatible = "qcom,tcsr-apq8064", "syscon";
+ reg = <0x1a400000 0x100>;
+ };
+
+ gpu: adreno-3xx@4300000 {
+ compatible = "qcom,adreno-3xx";
+ reg = <0x04300000 0x20000>;
+ reg-names = "kgsl_3d0_reg_memory";
+ interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "kgsl_3d0_irq";
+ clock-names =
+ "core_clk",
+ "iface_clk",
+ "mem_clk",
+ "mem_iface_clk";
+ clocks =
+ <&mmcc GFX3D_CLK>,
+ <&mmcc GFX3D_AHB_CLK>,
+ <&mmcc GFX3D_AXI_CLK>,
+ <&mmcc MMSS_IMEM_AHB_CLK>;
+ qcom,chipid = <0x03020002>;
+
+ iommus = <&gfx3d 0
+ &gfx3d 1
+ &gfx3d 2
+ &gfx3d 3
+ &gfx3d 4
+ &gfx3d 5
+ &gfx3d 6
+ &gfx3d 7
+ &gfx3d 8
+ &gfx3d 9
+ &gfx3d 10
+ &gfx3d 11
+ &gfx3d 12
+ &gfx3d 13
+ &gfx3d 14
+ &gfx3d 15
+ &gfx3d 16
+ &gfx3d 17
+ &gfx3d 18
+ &gfx3d 19
+ &gfx3d 20
+ &gfx3d 21
+ &gfx3d 22
+ &gfx3d 23
+ &gfx3d 24
+ &gfx3d 25
+ &gfx3d 26
+ &gfx3d 27
+ &gfx3d 28
+ &gfx3d 29
+ &gfx3d 30
+ &gfx3d 31
+ &gfx3d1 0
+ &gfx3d1 1
+ &gfx3d1 2
+ &gfx3d1 3
+ &gfx3d1 4
+ &gfx3d1 5
+ &gfx3d1 6
+ &gfx3d1 7
+ &gfx3d1 8
+ &gfx3d1 9
+ &gfx3d1 10
+ &gfx3d1 11
+ &gfx3d1 12
+ &gfx3d1 13
+ &gfx3d1 14
+ &gfx3d1 15
+ &gfx3d1 16
+ &gfx3d1 17
+ &gfx3d1 18
+ &gfx3d1 19
+ &gfx3d1 20
+ &gfx3d1 21
+ &gfx3d1 22
+ &gfx3d1 23
+ &gfx3d1 24
+ &gfx3d1 25
+ &gfx3d1 26
+ &gfx3d1 27
+ &gfx3d1 28
+ &gfx3d1 29
+ &gfx3d1 30
+ &gfx3d1 31>;
+
+ qcom,gpu-pwrlevels {
+ compatible = "qcom,gpu-pwrlevels";
+ qcom,gpu-pwrlevel@0 {
+ qcom,gpu-freq = <450000000>;
+ };
+ qcom,gpu-pwrlevel@1 {
+ qcom,gpu-freq = <27000000>;
+ };
+ };
+ };
+
+ mmss_sfpb: syscon@5700000 {
+ compatible = "syscon";
+ reg = <0x5700000 0x70>;
+ };
+
+ dsi0: mdss_dsi@4700000 {
+ compatible = "qcom,mdss-dsi-ctrl";
+ label = "MDSS DSI CTRL->0";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x04700000 0x200>;
+ reg-names = "dsi_ctrl";
+
+ clocks = <&mmcc DSI_M_AHB_CLK>,
+ <&mmcc DSI_S_AHB_CLK>,
+ <&mmcc AMP_AHB_CLK>,
+ <&mmcc DSI_CLK>,
+ <&mmcc DSI1_BYTE_CLK>,
+ <&mmcc DSI_PIXEL_CLK>,
+ <&mmcc DSI1_ESC_CLK>;
+ clock-names = "iface_clk", "bus_clk", "core_mmss_clk",
+ "src_clk", "byte_clk", "pixel_clk",
+ "core_clk";
+
+ assigned-clocks = <&mmcc DSI1_BYTE_SRC>,
+ <&mmcc DSI1_ESC_SRC>,
+ <&mmcc DSI_SRC>,
+ <&mmcc DSI_PIXEL_SRC>;
+ assigned-clock-parents = <&dsi0_phy 0>,
+ <&dsi0_phy 0>,
+ <&dsi0_phy 1>,
+ <&dsi0_phy 1>;
+ syscon-sfpb = <&mmss_sfpb>;
+ phys = <&dsi0_phy>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dsi0_in: endpoint {
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dsi0_out: endpoint {
+ };
+ };
+ };
+ };
+
+
+ dsi0_phy: dsi-phy@4700200 {
+ compatible = "qcom,dsi-phy-28nm-8960";
+ #clock-cells = <1>;
+ #phy-cells = <0>;
+
+ reg = <0x04700200 0x100>,
+ <0x04700300 0x200>,
+ <0x04700500 0x5c>;
+ reg-names = "dsi_pll", "dsi_phy", "dsi_phy_regulator";
+ clock-names = "iface_clk";
+ clocks = <&mmcc DSI_M_AHB_CLK>;
+ };
+
+
+ mdp_port0: iommu@7500000 {
+ compatible = "qcom,apq8064-iommu";
+ #iommu-cells = <1>;
+ clock-names =
+ "smmu_pclk",
+ "iommu_clk";
+ clocks =
+ <&mmcc SMMU_AHB_CLK>,
+ <&mmcc MDP_AXI_CLK>;
+ reg = <0x07500000 0x100000>;
+ interrupts =
+ <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,ncb = <2>;
+ };
+
+ mdp_port1: iommu@7600000 {
+ compatible = "qcom,apq8064-iommu";
+ #iommu-cells = <1>;
+ clock-names =
+ "smmu_pclk",
+ "iommu_clk";
+ clocks =
+ <&mmcc SMMU_AHB_CLK>,
+ <&mmcc MDP_AXI_CLK>;
+ reg = <0x07600000 0x100000>;
+ interrupts =
+ <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,ncb = <2>;
+ };
+
+ gfx3d: iommu@7c00000 {
+ compatible = "qcom,apq8064-iommu";
+ #iommu-cells = <1>;
+ clock-names =
+ "smmu_pclk",
+ "iommu_clk";
+ clocks =
+ <&mmcc SMMU_AHB_CLK>,
+ <&mmcc GFX3D_AXI_CLK>;
+ reg = <0x07c00000 0x100000>;
+ interrupts =
+ <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,ncb = <3>;
+ };
+
+ gfx3d1: iommu@7d00000 {
+ compatible = "qcom,apq8064-iommu";
+ #iommu-cells = <1>;
+ clock-names =
+ "smmu_pclk",
+ "iommu_clk";
+ clocks =
+ <&mmcc SMMU_AHB_CLK>,
+ <&mmcc GFX3D_AXI_CLK>;
+ reg = <0x07d00000 0x100000>;
+ interrupts =
+ <GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,ncb = <3>;
+ };
+
+ pcie: pci@1b500000 {
+ compatible = "qcom,pcie-apq8064", "snps,dw-pcie";
+ reg = <0x1b500000 0x1000
+ 0x1b502000 0x80
+ 0x1b600000 0x100
+ 0x0ff00000 0x100000>;
+ reg-names = "dbi", "elbi", "parf", "config";
+ device_type = "pci";
+ linux,pci-domain = <0>;
+ bus-range = <0x00 0xff>;
+ num-lanes = <1>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x81000000 0 0 0x0fe00000 0 0x00100000 /* I/O */
+<<<<<<<
+ 0x82000000 0 0 0x08000000 0 0x07e00000>; /* memory */
+=======
+ 0x82000000 0 0x08000000 0x08000000 0 0x07e00000>; /* memory */
+>>>>>>>
+ interrupts = <GIC_SPI 238 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 36 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+ <0 0 0 2 &intc 0 37 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+ <0 0 0 3 &intc 0 38 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+ <0 0 0 4 &intc 0 39 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+ clocks = <&gcc PCIE_A_CLK>,
+ <&gcc PCIE_H_CLK>,
+ <&gcc PCIE_PHY_REF_CLK>;
+ clock-names = "core", "iface", "phy";
+ resets = <&gcc PCIE_ACLK_RESET>,
+ <&gcc PCIE_HCLK_RESET>,
+ <&gcc PCIE_POR_RESET>,
+ <&gcc PCIE_PCI_RESET>,
+ <&gcc PCIE_PHY_RESET>;
+ reset-names = "axi", "ahb", "por", "pci", "phy";
+ status = "disabled";
+ };
+
+ pil_q6v4: pil@28800000 {
+ compatible = "qcom,tz-pil", "qcom,apq8064-tz-pil";
+ qcom,firmware-name = "q6";
+ reg = <0x28800000 0x100>;
+ reg-names = "qdsp6_base";
+ qcom,pas-id = <1>; /* PAS_Q6 */
+
+
+ smd-edge {
+ interrupts = <GIC_SPI 90 IRQ_TYPE_EDGE_RISING>;
+ lable = "lpass";
+ qcom,ipc = <&l2cc 8 15>;
+ qcom,smd-edge = <1>;
+
+ status = "okay";
+
+ };
+
+ };
+
+ hdmi: hdmi-tx@4a00000 {
+ compatible = "qcom,hdmi-tx-8960";
+ pinctrl-names = "default";
+ pinctrl-0 = <&hdmi_pinctrl>;
+ reg = <0x04a00000 0x2f0>;
+ reg-names = "core_physical";
+ interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mmcc HDMI_APP_CLK>,
+ <&mmcc HDMI_M_AHB_CLK>,
+ <&mmcc HDMI_S_AHB_CLK>;
+ clock-names = "core_clk",
+ "master_iface_clk",
+ "slave_iface_clk";
+
+ phys = <&hdmi_phy>;
+ phy-names = "hdmi-phy";
+ #sound-dai-cells = <1>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ hdmi_in: endpoint {
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ hdmi_out: endpoint {
+ };
+ };
+ };
+ };
+
+ hdmi_phy: hdmi-phy@4a00400 {
+ compatible = "qcom,hdmi-phy-8960";
+ reg = <0x4a00400 0x60>,
+ <0x4a00500 0x100>;
+ reg-names = "hdmi_phy",
+ "hdmi_pll";
+
+ clocks = <&mmcc HDMI_S_AHB_CLK>;
+ clock-names = "slave_iface_clk";
+ #phy-cells = <0>;
+ };
+
+ mdp: mdp@5100000 {
+ compatible = "qcom,mdp4";
+ reg = <0x05100000 0xf0000>;
+ interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mmcc MDP_CLK>,
+ <&mmcc MDP_AHB_CLK>,
+ <&mmcc MDP_AXI_CLK>,
+ <&mmcc MDP_LUT_CLK>,
+ <&mmcc HDMI_TV_CLK>,
+ <&mmcc MDP_TV_CLK>;
+ clock-names = "core_clk",
+ "iface_clk",
+ "bus_clk",
+ "lut_clk",
+ "hdmi_clk",
+ "tv_clk";
+
+ iommus = <&mdp_port0 0
+ &mdp_port0 2
+ &mdp_port1 0
+ &mdp_port1 2>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ mdp_lvds_out: endpoint {
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ mdp_dsi1_out: endpoint {
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+ mdp_dsi2_out: endpoint {
+ };
+ };
+
+ port@3 {
+ reg = <3>;
+ mdp_dtv_out: endpoint {
+ };
+ };
+ };
+ };
+
+
+
+ riva: riva-pil@3204000 {
+ compatible = "qcom,riva-pil";
+
+ reg = <0x03200800 0x1000>, <0x03202000 0x2000>, <0x03204000 0x100>;
+ reg-names = "ccu", "dxe", "pmu";
+
+ interrupts-extended = <&intc GIC_SPI 199 IRQ_TYPE_EDGE_RISING>,
+ <&wcnss_smsm 6 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog", "fatal";
+
+ memory-region = <&wcnss_mem>;
+
+ vddcx-supply = <&pm8921_s3>;
+ vddmx-supply = <&pm8921_l24>;
+ vddpx-supply = <&pm8921_s4>;
+
+ status = "disabled";
+
+ iris {
+ compatible = "qcom,wcn3660";
+
+ clocks = <&cxo_board>;
+ clock-names = "xo";
+
+ vddxo-supply = <&pm8921_l4>;
+ vddrfa-supply = <&pm8921_s2>;
+ vddpa-supply = <&pm8921_l10>;
+ vdddig-supply = <&pm8921_lvs2>;
+ };
+
+ smd-edge {
+ interrupts = <GIC_SPI 198 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,ipc = <&l2cc 8 25>;
+ qcom,smd-edge = <6>;
+
+ label = "riva";
+
+ wcnss {
+ compatible = "qcom,wcnss";
+ qcom,smd-channels = "WCNSS_CTRL";
+
+ qcom,mmio = <&riva>;
+
+ bt {
+ compatible = "qcom,wcnss-bt";
+ };
+
+ wifi {
+ compatible = "qcom,wcnss-wlan";
+
+ interrupts = <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tx", "rx";
+
+ qcom,smem-states = <&apps_smsm 10>, <&apps_smsm 9>;
+ qcom,smem-state-names = "tx-enable", "tx-rings-empty";
+ };
+ };
+ };
+ };
+
+ etb@1a01000 {
+ compatible = "coresight-etb10", "arm,primecell";
+ reg = <0x1a01000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ port {
+ etb_in: endpoint {
+ slave-mode;
+ remote-endpoint = <&replicator_out0>;
+ };
+ };
+ };
+
+ tpiu@1a03000 {
+ compatible = "arm,coresight-tpiu", "arm,primecell";
+ reg = <0x1a03000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ port {
+ tpiu_in: endpoint {
+ slave-mode;
+ remote-endpoint = <&replicator_out1>;
+ };
+ };
+ };
+
+ replicator {
+ compatible = "arm,coresight-replicator";
+
+ clocks = <&rpmcc RPM_QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ replicator_out0: endpoint {
+ remote-endpoint = <&etb_in>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ replicator_out1: endpoint {
+ remote-endpoint = <&tpiu_in>;
+ };
+ };
+ port@2 {
+ reg = <0>;
+ replicator_in: endpoint {
+ slave-mode;
+ remote-endpoint = <&funnel_out>;
+ };
+ };
+ };
+ };
+
+ funnel@1a04000 {
+ compatible = "arm,coresight-funnel", "arm,primecell";
+ reg = <0x1a04000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /*
+ * Not described input ports:
+ * 2 - connected to STM component
+ * 3 - not-connected
+ * 6 - not-connected
+ * 7 - not-connected
+ */
+ port@0 {
+ reg = <0>;
+ funnel_in0: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm0_out>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ funnel_in1: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm1_out>;
+ };
+ };
+ port@4 {
+ reg = <4>;
+ funnel_in4: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm2_out>;
+ };
+ };
+ port@5 {
+ reg = <5>;
+ funnel_in5: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm3_out>;
+ };
+ };
+ port@8 {
+ reg = <0>;
+ funnel_out: endpoint {
+ remote-endpoint = <&replicator_in>;
+ };
+ };
+ };
+ };
+
+ etm@1a1c000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0x1a1c000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ cpu = <&CPU0>;
+
+ port {
+ etm0_out: endpoint {
+ remote-endpoint = <&funnel_in0>;
+ };
+ };
+ };
+
+ etm@1a1d000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0x1a1d000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ cpu = <&CPU1>;
+
+ port {
+ etm1_out: endpoint {
+ remote-endpoint = <&funnel_in1>;
+ };
+ };
+ };
+
+ etm@1a1e000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0x1a1e000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ cpu = <&CPU2>;
+
+ port {
+ etm2_out: endpoint {
+ remote-endpoint = <&funnel_in4>;
+ };
+ };
+ };
+
+ etm@1a1f000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0x1a1f000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ cpu = <&CPU3>;
+
+ port {
+ etm3_out: endpoint {
+ remote-endpoint = <&funnel_in5>;
+ };
+ };
+ };
+ };
+};
+#include "qcom-apq8064-pins.dtsi"
diff --git a/rr-cache/4dee8d4658e5f1a7754d6fdf7f6ac71c18f63172/postimage b/rr-cache/4dee8d4658e5f1a7754d6fdf7f6ac71c18f63172/postimage
new file mode 100644
index 0000000..bcc612c
--- /dev/null
+++ b/rr-cache/4dee8d4658e5f1a7754d6fdf7f6ac71c18f63172/postimage
@@ -0,0 +1,84 @@
+Qualcomm APR (Asynchronous Packet Router) binding
+
+This binding describes the Qualcomm APR. APR is a IPC protocol for
+communication between Application processor and QDSP. APR is mainly
+used for audio/voice services on the QDSP.
+
+- compatible:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "qcom,apr-v<VERSION-NUMBER>", example "qcom,apr-v2"
+
+- reg
+ Usage: required
+ Value type: <u32>
+ Definition: Destination processor ID.
+ Possible values are :
+ 1 - APR simulator
+ 2 - PC
+ 3 - MODEM
+ 4 - ADSP
+ 5 - APPS
+ 6 - MODEM2
+ 7 - APPS2
+
+= APR SERVICES
+Each subnode of the APR node represents service tied to this apr. The name
+of the nodes are not important. The properties of these nodes are defined
+by the individual bindings for the specific service
+- All APR services MUST contain the following property:
+
+- reg
+ Usage: required
+ Value type: <u32>
+ Definition: APR Service ID
+ Possible values are :
+ 3 - DSP Core Service
+ 4 - Audio Front End Service.
+ 5 - Voice Stream Manager Service.
+ 6 - Voice processing manager.
+ 7 - Audio Stream Manager Service.
+ 8 - Audio Device Manager Service.
+ 9 - Multimode voice manager.
+ 10 - Core voice stream.
+ 11 - Core voice processor.
+ 12 - Ultrasound stream manager.
+ 13 - Listen stream manager.
+
+= EXAMPLE
+The following example represents a QDSP based sound card on a MSM8996 device
+which uses apr as communication between Apps and QDSP.
+
+ apr@4 {
+ compatible = "qcom,apr-v2";
+ reg = <APR_DOMAIN_ADSP>;
+
+ q6core@3 {
+ compatible = "qcom,q6core";
+ reg = <APR_SVC_ADSP_CORE>;
+ };
+
+ q6afe@4 {
+ compatible = "qcom,q6afe";
+ reg = <APR_SVC_AFE>;
+
+ dais {
+ #sound-dai-cells = <1>;
+ hdmi@1 {
+ reg = <1>;
+ };
+ };
+ };
+
+ q6asm@7 {
+ compatible = "qcom,q6asm";
+ reg = <APR_SVC_ASM>;
+ ...
+ };
+
+ q6adm@8 {
+ compatible = "qcom,q6adm";
+ reg = <APR_SVC_ADM>;
+ ...
+ };
+ };
diff --git a/rr-cache/4dee8d4658e5f1a7754d6fdf7f6ac71c18f63172/preimage b/rr-cache/4dee8d4658e5f1a7754d6fdf7f6ac71c18f63172/preimage
new file mode 100644
index 0000000..3ba862d
--- /dev/null
+++ b/rr-cache/4dee8d4658e5f1a7754d6fdf7f6ac71c18f63172/preimage
@@ -0,0 +1,137 @@
+Qualcomm APR (Asynchronous Packet Router) binding
+
+This binding describes the Qualcomm APR. APR is a IPC protocol for
+communication between Application processor and QDSP. APR is mainly
+used for audio/voice services on the QDSP.
+
+- compatible:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "qcom,apr-v<VERSION-NUMBER>", example "qcom,apr-v2"
+
+<<<<<<<
+- qcom,apr-dest-domain-id
+ Usage: required
+ Value type: <prop-encoded-array>
+=======
+- reg
+ Usage: required
+ Value type: <u32>
+>>>>>>>
+ Definition: Destination processor ID.
+ Possible values are :
+ 1 - APR simulator
+ 2 - PC
+ 3 - MODEM
+ 4 - ADSP
+ 5 - APPS
+ 6 - MODEM2
+ 7 - APPS2
+
+= APR SERVICES
+<<<<<<<
+Each subnode of the APR node can represent service tied to this apr. The name
+of the nodes are not important. The properties of these nodes are defined
+by the individual bindings for the specific service
+- but must contain the following property:
+
+- qcom,apr-svc-id
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: APR Service ID, used for matching the service.
+=======
+Each subnode of the APR node represents service tied to this apr. The name
+of the nodes are not important. The properties of these nodes are defined
+by the individual bindings for the specific service
+- All APR services MUST contain the following property:
+
+- reg
+ Usage: required
+ Value type: <u32>
+ Definition: APR Service ID
+>>>>>>>
+ Possible values are :
+ 3 - DSP Core Service
+ 4 - Audio Front End Service.
+ 5 - Voice Stream Manager Service.
+ 6 - Voice processing manager.
+ 7 - Audio Stream Manager Service.
+ 8 - Audio Device Manager Service.
+ 9 - Multimode voice manager.
+ 10 - Core voice stream.
+ 11 - Core voice processor.
+ 12 - Ultrasound stream manager.
+ 13 - Listen stream manager.
+
+<<<<<<<
+=======
+- qcom,apr-svc-name
+ Usage: required
+ Value type: <stringlist>
+ Definition: User readable name of a APR service.
+
+= APR DEVICES:
+Each subnode of the APR node can represent devices tied to this apr, like
+sound-card. The properties of these nodes are defined by the individual
+bindings for the specific device.
+
+>>>>>>>
+= EXAMPLE
+The following example represents a QDSP based sound card on a MSM8996 device
+which uses apr as communication between Apps and QDSP.
+
+<<<<<<<
+ apr {
+ compatible = "qcom,apr-v2";
+ qcom,smd-channels = "apr_audio_svc";
+ qcom,apr-dest-domain-id = <APR_DOMAIN_ADSP>;
+
+ q6core {
+ compatible = "qcom,q6core";
+ qcom,apr-svc-name = "CORE";
+ qcom,apr-svc-id = <APR_SVC_ADSP_CORE>;
+ };
+
+ q6afe {
+ compatible = "qcom,q6afe";
+ qcom,apr-svc-name = "AFE";
+ qcom,apr-svc-id = <APR_SVC_AFE>;
+ };
+
+ audio {
+ compatible = "qcom,msm8996-snd-card";
+=======
+ apr@4 {
+ compatible = "qcom,apr-v2";
+ reg = <APR_DOMAIN_ADSP>;
+
+ q6core@3 {
+ compatible = "qcom,q6core";
+ reg = <APR_SVC_ADSP_CORE>;
+ };
+
+ q6afe@4 {
+ compatible = "qcom,q6afe";
+ reg = <APR_SVC_AFE>;
+
+ dais {
+ #sound-dai-cells = <1>;
+ hdmi@1 {
+ reg = <1>;
+ };
+ };
+ };
+
+ q6asm@7 {
+ compatible = "qcom,q6asm";
+ reg = <APR_SVC_ASM>;
+ ...
+ };
+
+ q6adm@8 {
+ compatible = "qcom,q6adm";
+ reg = <APR_SVC_ADM>;
+>>>>>>>
+ ...
+ };
+ };
diff --git a/rr-cache/4dee8d4658e5f1a7754d6fdf7f6ac71c18f63172/thisimage b/rr-cache/4dee8d4658e5f1a7754d6fdf7f6ac71c18f63172/thisimage
new file mode 100644
index 0000000..3ba862d
--- /dev/null
+++ b/rr-cache/4dee8d4658e5f1a7754d6fdf7f6ac71c18f63172/thisimage
@@ -0,0 +1,137 @@
+Qualcomm APR (Asynchronous Packet Router) binding
+
+This binding describes the Qualcomm APR. APR is a IPC protocol for
+communication between Application processor and QDSP. APR is mainly
+used for audio/voice services on the QDSP.
+
+- compatible:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "qcom,apr-v<VERSION-NUMBER>", example "qcom,apr-v2"
+
+<<<<<<<
+- qcom,apr-dest-domain-id
+ Usage: required
+ Value type: <prop-encoded-array>
+=======
+- reg
+ Usage: required
+ Value type: <u32>
+>>>>>>>
+ Definition: Destination processor ID.
+ Possible values are :
+ 1 - APR simulator
+ 2 - PC
+ 3 - MODEM
+ 4 - ADSP
+ 5 - APPS
+ 6 - MODEM2
+ 7 - APPS2
+
+= APR SERVICES
+<<<<<<<
+Each subnode of the APR node can represent service tied to this apr. The name
+of the nodes are not important. The properties of these nodes are defined
+by the individual bindings for the specific service
+- but must contain the following property:
+
+- qcom,apr-svc-id
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: APR Service ID, used for matching the service.
+=======
+Each subnode of the APR node represents service tied to this apr. The name
+of the nodes are not important. The properties of these nodes are defined
+by the individual bindings for the specific service
+- All APR services MUST contain the following property:
+
+- reg
+ Usage: required
+ Value type: <u32>
+ Definition: APR Service ID
+>>>>>>>
+ Possible values are :
+ 3 - DSP Core Service
+ 4 - Audio Front End Service.
+ 5 - Voice Stream Manager Service.
+ 6 - Voice processing manager.
+ 7 - Audio Stream Manager Service.
+ 8 - Audio Device Manager Service.
+ 9 - Multimode voice manager.
+ 10 - Core voice stream.
+ 11 - Core voice processor.
+ 12 - Ultrasound stream manager.
+ 13 - Listen stream manager.
+
+<<<<<<<
+=======
+- qcom,apr-svc-name
+ Usage: required
+ Value type: <stringlist>
+ Definition: User readable name of a APR service.
+
+= APR DEVICES:
+Each subnode of the APR node can represent devices tied to this apr, like
+sound-card. The properties of these nodes are defined by the individual
+bindings for the specific device.
+
+>>>>>>>
+= EXAMPLE
+The following example represents a QDSP based sound card on a MSM8996 device
+which uses apr as communication between Apps and QDSP.
+
+<<<<<<<
+ apr {
+ compatible = "qcom,apr-v2";
+ qcom,smd-channels = "apr_audio_svc";
+ qcom,apr-dest-domain-id = <APR_DOMAIN_ADSP>;
+
+ q6core {
+ compatible = "qcom,q6core";
+ qcom,apr-svc-name = "CORE";
+ qcom,apr-svc-id = <APR_SVC_ADSP_CORE>;
+ };
+
+ q6afe {
+ compatible = "qcom,q6afe";
+ qcom,apr-svc-name = "AFE";
+ qcom,apr-svc-id = <APR_SVC_AFE>;
+ };
+
+ audio {
+ compatible = "qcom,msm8996-snd-card";
+=======
+ apr@4 {
+ compatible = "qcom,apr-v2";
+ reg = <APR_DOMAIN_ADSP>;
+
+ q6core@3 {
+ compatible = "qcom,q6core";
+ reg = <APR_SVC_ADSP_CORE>;
+ };
+
+ q6afe@4 {
+ compatible = "qcom,q6afe";
+ reg = <APR_SVC_AFE>;
+
+ dais {
+ #sound-dai-cells = <1>;
+ hdmi@1 {
+ reg = <1>;
+ };
+ };
+ };
+
+ q6asm@7 {
+ compatible = "qcom,q6asm";
+ reg = <APR_SVC_ASM>;
+ ...
+ };
+
+ q6adm@8 {
+ compatible = "qcom,q6adm";
+ reg = <APR_SVC_ADM>;
+>>>>>>>
+ ...
+ };
+ };
diff --git a/rr-cache/50fb8c9f6de7874da63dc84eeefb2014c9123899/postimage b/rr-cache/50fb8c9f6de7874da63dc84eeefb2014c9123899/postimage
new file mode 100644
index 0000000..0063624
--- /dev/null
+++ b/rr-cache/50fb8c9f6de7874da63dc84eeefb2014c9123899/postimage
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_BINDINGS_QCOM_APR_H
+#define __DT_BINDINGS_QCOM_APR_H
+
+/* Domain IDs */
+#define APR_DOMAIN_SIM 0x1
+#define APR_DOMAIN_PC 0x2
+#define APR_DOMAIN_MODEM 0x3
+#define APR_DOMAIN_ADSP 0x4
+#define APR_DOMAIN_APPS 0x5
+#define APR_DOMAIN_MAX 0x6
+
+/* ADSP service IDs */
+#define APR_SVC_ADSP_CORE 0x3
+#define APR_SVC_AFE 0x4
+#define APR_SVC_VSM 0x5
+#define APR_SVC_VPM 0x6
+#define APR_SVC_ASM 0x7
+#define APR_SVC_ADM 0x8
+#define APR_SVC_ADSP_MVM 0x09
+#define APR_SVC_ADSP_CVS 0x0A
+#define APR_SVC_ADSP_CVP 0x0B
+#define APR_SVC_USM 0x0C
+#define APR_SVC_LSM 0x0D
+#define APR_SVC_VIDC 0x16
+#define APR_SVC_MAX 0x17
+
+#endif /* __DT_BINDINGS_QCOM_APR_H */
diff --git a/rr-cache/50fb8c9f6de7874da63dc84eeefb2014c9123899/preimage b/rr-cache/50fb8c9f6de7874da63dc84eeefb2014c9123899/preimage
new file mode 100644
index 0000000..074bc39
--- /dev/null
+++ b/rr-cache/50fb8c9f6de7874da63dc84eeefb2014c9123899/preimage
@@ -0,0 +1,40 @@
+<<<<<<<
+=======
+/* SPDX-License-Identifier: GPL-2.0 */
+>>>>>>>
+#ifndef __DT_BINDINGS_QCOM_APR_H
+#define __DT_BINDINGS_QCOM_APR_H
+
+/* Domain IDs */
+<<<<<<<
+#define APR_DOMAIN_SIM 0x1
+#define APR_DOMAIN_PC 0x2
+#define APR_DOMAIN_MODEM 0x3
+#define APR_DOMAIN_ADSP 0x4
+#define APR_DOMAIN_APPS 0x5
+#define APR_DOMAIN_MAX 0x6
+=======
+#define APR_DOMAIN_SIM 0x1
+#define APR_DOMAIN_PC 0x2
+#define APR_DOMAIN_MODEM 0x3
+#define APR_DOMAIN_ADSP 0x4
+#define APR_DOMAIN_APPS 0x5
+#define APR_DOMAIN_MAX 0x6
+>>>>>>>
+
+/* ADSP service IDs */
+#define APR_SVC_ADSP_CORE 0x3
+#define APR_SVC_AFE 0x4
+#define APR_SVC_VSM 0x5
+#define APR_SVC_VPM 0x6
+#define APR_SVC_ASM 0x7
+#define APR_SVC_ADM 0x8
+#define APR_SVC_ADSP_MVM 0x09
+#define APR_SVC_ADSP_CVS 0x0A
+#define APR_SVC_ADSP_CVP 0x0B
+#define APR_SVC_USM 0x0C
+#define APR_SVC_LSM 0x0D
+#define APR_SVC_VIDC 0x16
+#define APR_SVC_MAX 0x17
+
+#endif /* __DT_BINDINGS_QCOM_APR_H */
diff --git a/rr-cache/50fb8c9f6de7874da63dc84eeefb2014c9123899/thisimage b/rr-cache/50fb8c9f6de7874da63dc84eeefb2014c9123899/thisimage
new file mode 100644
index 0000000..074bc39
--- /dev/null
+++ b/rr-cache/50fb8c9f6de7874da63dc84eeefb2014c9123899/thisimage
@@ -0,0 +1,40 @@
+<<<<<<<
+=======
+/* SPDX-License-Identifier: GPL-2.0 */
+>>>>>>>
+#ifndef __DT_BINDINGS_QCOM_APR_H
+#define __DT_BINDINGS_QCOM_APR_H
+
+/* Domain IDs */
+<<<<<<<
+#define APR_DOMAIN_SIM 0x1
+#define APR_DOMAIN_PC 0x2
+#define APR_DOMAIN_MODEM 0x3
+#define APR_DOMAIN_ADSP 0x4
+#define APR_DOMAIN_APPS 0x5
+#define APR_DOMAIN_MAX 0x6
+=======
+#define APR_DOMAIN_SIM 0x1
+#define APR_DOMAIN_PC 0x2
+#define APR_DOMAIN_MODEM 0x3
+#define APR_DOMAIN_ADSP 0x4
+#define APR_DOMAIN_APPS 0x5
+#define APR_DOMAIN_MAX 0x6
+>>>>>>>
+
+/* ADSP service IDs */
+#define APR_SVC_ADSP_CORE 0x3
+#define APR_SVC_AFE 0x4
+#define APR_SVC_VSM 0x5
+#define APR_SVC_VPM 0x6
+#define APR_SVC_ASM 0x7
+#define APR_SVC_ADM 0x8
+#define APR_SVC_ADSP_MVM 0x09
+#define APR_SVC_ADSP_CVS 0x0A
+#define APR_SVC_ADSP_CVP 0x0B
+#define APR_SVC_USM 0x0C
+#define APR_SVC_LSM 0x0D
+#define APR_SVC_VIDC 0x16
+#define APR_SVC_MAX 0x17
+
+#endif /* __DT_BINDINGS_QCOM_APR_H */
diff --git a/rr-cache/511ee021ca21c4482bc2820278820cb282866ece/preimage b/rr-cache/511ee021ca21c4482bc2820278820cb282866ece/preimage
new file mode 100644
index 0000000..b9d89c6
--- /dev/null
+++ b/rr-cache/511ee021ca21c4482bc2820278820cb282866ece/preimage
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+dtb-$(CONFIG_ARCH_QCOM) += apq8016-sbc.dtb
+dtb-$(CONFIG_ARCH_QCOM) += apq8096-db820c.dtb
+dtb-$(CONFIG_ARCH_QCOM) += ipq8074-hk01.dtb
+dtb-$(CONFIG_ARCH_QCOM) += msm8916-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM) += msm8992-bullhead-rev-101.dtb
+dtb-$(CONFIG_ARCH_QCOM) += msm8994-angler-rev-101.dtb
+dtb-$(CONFIG_ARCH_QCOM) += msm8996-mtp.dtb
+<<<<<<<
+=======
+dtb-$(CONFIG_ARCH_QCOM) += msm8998-mtp.dtb
+>>>>>>>
+dtb-$(CONFIG_ARCH_QCOM) += sdm845-mtp.dtb
diff --git a/rr-cache/521b913e8a5b94999eff25f480e11e42f1488f77/postimage b/rr-cache/521b913e8a5b94999eff25f480e11e42f1488f77/postimage
new file mode 100644
index 0000000..561cd42
--- /dev/null
+++ b/rr-cache/521b913e8a5b94999eff25f480e11e42f1488f77/postimage
@@ -0,0 +1,255 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018, Linaro Limited
+
+#include <linux/soc/qcom/apr.h>
+#include <linux/module.h>
+#include <linux/component.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+
+static int apq8096_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+
+ rate->min = rate->max = 48000;
+ channels->min = channels->max = 2;
+
+ return 0;
+}
+
+static int apq8096_sbc_parse_of(struct snd_soc_card *card)
+{
+ struct device_node *np;
+ struct device_node *codec = NULL;
+ struct device_node *platform = NULL;
+ struct device_node *cpu = NULL;
+ struct device *dev = card->dev;
+ struct snd_soc_dai_link *link;
+ int ret, num_links;
+
+ ret = snd_soc_of_parse_card_name(card, "qcom,model");
+ if (ret) {
+ dev_err(dev, "Error parsing card name: %d\n", ret);
+ return ret;
+ }
+
+ /* DAPM routes */
+ if (of_property_read_bool(dev->of_node, "qcom,audio-routing")) {
+ ret = snd_soc_of_parse_audio_routing(card,
+ "qcom,audio-routing");
+ if (ret)
+ return ret;
+ }
+
+ /* Populate links */
+ num_links = of_get_child_count(dev->of_node);
+
+ /* Allocate the DAI link array */
+ card->dai_link = kcalloc(num_links, sizeof(*link), GFP_KERNEL);
+ if (!card->dai_link)
+ return -ENOMEM;
+
+ card->num_links = num_links;
+ link = card->dai_link;
+
+ for_each_child_of_node(dev->of_node, np) {
+ cpu = of_get_child_by_name(np, "cpu");
+ if (!cpu) {
+ dev_err(dev, "Can't find cpu DT node\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ link->cpu_of_node = of_parse_phandle(cpu, "sound-dai", 0);
+ if (!link->cpu_of_node) {
+ dev_err(card->dev, "error getting cpu phandle\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = snd_soc_of_get_dai_name(cpu, &link->cpu_dai_name);
+ if (ret) {
+ dev_err(card->dev, "error getting cpu dai name\n");
+ goto err;
+ }
+
+ platform = of_get_child_by_name(np, "platform");
+ codec = of_get_child_by_name(np, "codec");
+ if (codec && platform) {
+ link->platform_of_node = of_parse_phandle(platform,
+ "sound-dai",
+ 0);
+ if (!link->platform_of_node) {
+ dev_err(card->dev, "platform dai not found\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = snd_soc_of_get_dai_link_codecs(dev, codec, link);
+ if (ret < 0) {
+ dev_err(card->dev, "codec dai not found\n");
+ goto err;
+ }
+ link->no_pcm = 1;
+ link->ignore_pmdown_time = 1;
+ link->be_hw_params_fixup = apq8096_be_hw_params_fixup;
+ } else {
+ link->platform_of_node = link->cpu_of_node;
+ link->codec_dai_name = "snd-soc-dummy-dai";
+ link->codec_name = "snd-soc-dummy";
+ link->dynamic = 1;
+ }
+
+ link->ignore_suspend = 1;
+ ret = of_property_read_string(np, "link-name", &link->name);
+ if (ret) {
+ dev_err(card->dev, "error getting codec dai_link name\n");
+ goto err;
+ }
+
+ link->dpcm_playback = 1;
+ link->dpcm_capture = 1;
+ link->stream_name = link->name;
+ link++;
+ }
+
+ return 0;
+err:
+ of_node_put(cpu);
+ of_node_put(codec);
+ of_node_put(platform);
+ kfree(card->dai_link);
+ return ret;
+}
+
+static int apq8096_bind(struct device *dev)
+{
+ struct snd_soc_card *card;
+ int ret;
+
+ card = kzalloc(sizeof(*card), GFP_KERNEL);
+ if (!card)
+ return -ENOMEM;
+
+ component_bind_all(dev, card);
+ card->dev = dev;
+ ret = apq8096_sbc_parse_of(card);
+ if (ret) {
+ dev_err(dev, "Error parsing OF data\n");
+ goto err;
+ }
+
+ ret = snd_soc_register_card(card);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ component_unbind_all(dev, card);
+ kfree(card);
+ return ret;
+}
+
+static void apq8096_unbind(struct device *dev)
+{
+ struct snd_soc_card *card = dev_get_drvdata(dev);
+
+ snd_soc_unregister_card(card);
+ component_unbind_all(dev, card);
+ kfree(card->dai_link);
+ kfree(card);
+}
+
+static const struct component_master_ops apq8096_ops = {
+ .bind = apq8096_bind,
+ .unbind = apq8096_unbind,
+};
+
+static int apq8016_compare_of(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+static void apq8016_release_of(struct device *dev, void *data)
+{
+ of_node_put(data);
+}
+
+static int add_audio_components(struct device *dev,
+ struct component_match **matchptr)
+{
+ struct device_node *np, *platform, *cpu, *node, *dai_node;
+
+ node = dev->of_node;
+
+ for_each_child_of_node(node, np) {
+ cpu = of_get_child_by_name(np, "cpu");
+ if (cpu) {
+ dai_node = of_parse_phandle(cpu, "sound-dai", 0);
+ of_node_get(dai_node);
+ component_match_add_release(dev, matchptr,
+ apq8016_release_of,
+ apq8016_compare_of,
+ dai_node);
+ }
+
+ platform = of_get_child_by_name(np, "platform");
+ if (platform) {
+ dai_node = of_parse_phandle(platform, "sound-dai", 0);
+ component_match_add_release(dev, matchptr,
+ apq8016_release_of,
+ apq8016_compare_of,
+ dai_node);
+ }
+ }
+
+ return 0;
+}
+
+static int apq8096_platform_probe(struct platform_device *pdev)
+{
+ struct component_match *match = NULL;
+ int ret;
+
+ ret = add_audio_components(&pdev->dev, &match);
+ if (ret)
+ return ret;
+
+ return component_master_add_with_match(&pdev->dev, &apq8096_ops, match);
+}
+
+static int apq8096_platform_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &apq8096_ops);
+
+ return 0;
+}
+
+static const struct of_device_id msm_snd_apq8096_dt_match[] = {
+ {.compatible = "qcom,apq8096-sndcard"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_snd_apq8096_dt_match);
+
+static struct platform_driver msm_snd_apq8096_driver = {
+ .probe = apq8096_platform_probe,
+ .remove = apq8096_platform_remove,
+ .driver = {
+ .name = "msm-snd-apq8096",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_snd_apq8096_dt_match,
+ },
+};
+module_platform_driver(msm_snd_apq8096_driver);
+MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
+MODULE_DESCRIPTION("APQ8096 ASoC Machine Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/rr-cache/521b913e8a5b94999eff25f480e11e42f1488f77/preimage b/rr-cache/521b913e8a5b94999eff25f480e11e42f1488f77/preimage
new file mode 100644
index 0000000..614c0be
--- /dev/null
+++ b/rr-cache/521b913e8a5b94999eff25f480e11e42f1488f77/preimage
@@ -0,0 +1,403 @@
+// SPDX-License-Identifier: GPL-2.0
+<<<<<<<
+/*
+ * Copyright (c) 2018, Linaro Limited
+ */
+
+#include <linux/soc/qcom/apr.h>
+#include <linux/module.h>
+=======
+// Copyright (c) 2018, Linaro Limited
+
+#include <linux/soc/qcom/apr.h>
+#include <linux/module.h>
+#include <linux/component.h>
+#include <linux/platform_device.h>
+>>>>>>>
+#include <linux/of_device.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+
+<<<<<<<
+static int apq8096_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+=======
+static int msm8996_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+>>>>>>>
+{
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+
+ rate->min = rate->max = 48000;
+ channels->min = channels->max = 2;
+
+ return 0;
+}
+
+static int apq8096_sbc_parse_of(struct snd_soc_card *card)
+{
+<<<<<<<
+ struct device *dev = card->dev;
+ struct snd_soc_dai_link *link;
+ struct device_node *np, *codec, *platform, *cpu, *node = dev->of_node;
+ int ret, num_links;
+ bool is_fe;
+
+
+ ret = snd_soc_of_parse_card_name(card, "qcom,model");
+ if (ret)
+ dev_err(dev, "Error parsing card name: %d\n", ret);
+
+ if (of_property_read_bool(dev->of_node, "qcom,audio-routing"))
+ ret = snd_soc_of_parse_audio_routing(card,
+ "qcom,audio-routing");
+
+ /* Populate links */
+ num_links = of_get_child_count(node);
+
+ dev_info(dev, "Found %d child audio dai links..\n", num_links);
+ /* Allocate the private data and the DAI link array */
+ card->dai_link = devm_kzalloc(dev, sizeof(*link) * num_links,
+ GFP_KERNEL);
+=======
+ struct device_node *np;
+ struct device_node *codec = NULL;
+ struct device_node *platform = NULL;
+ struct device_node *cpu = NULL;
+ struct device *dev = card->dev;
+ struct snd_soc_dai_link *link;
+ int ret, num_links;
+
+ ret = snd_soc_of_parse_card_name(card, "qcom,model");
+ if (ret) {
+ dev_err(dev, "Error parsing card name: %d\n", ret);
+ return ret;
+ }
+
+ /* DAPM routes */
+ if (of_property_read_bool(dev->of_node, "qcom,audio-routing")) {
+ ret = snd_soc_of_parse_audio_routing(card,
+ "qcom,audio-routing");
+ if (ret)
+ return ret;
+ }
+
+ /* Populate links */
+ num_links = of_get_child_count(dev->of_node);
+
+ /* Allocate the DAI link array */
+ card->dai_link = kcalloc(num_links, sizeof(*link), GFP_KERNEL);
+>>>>>>>
+ if (!card->dai_link)
+ return -ENOMEM;
+
+ card->num_links = num_links;
+<<<<<<<
+ link = card->dai_link;
+
+ for_each_child_of_node(dev->of_node, np) {
+ cpu = of_get_child_by_name(np, "cpu");
+ if (!cpu) {
+ dev_err(dev, "Can't find cpu DT node\n");
+ ret = -EINVAL;
+ goto err;
+=======
+
+ link = card->dai_link;
+
+ for_each_child_of_node(node, np) {
+ is_fe = false;
+ if (of_property_read_bool(np, "is-fe"))
+ is_fe = true;
+
+ if (is_fe) {
+ /* BE is dummy */
+ link->codec_of_node = NULL;
+ link->codec_dai_name = "snd-soc-dummy-dai";
+ link->codec_name = "snd-soc-dummy";
+
+ /* FE settings */
+ link->dynamic = 1;
+ link->dpcm_playback = 1;
+
+ } else {
+ link->no_pcm = 1;
+ link->dpcm_playback = 1;
+ link->ignore_suspend = 1;
+ link->ignore_pmdown_time = 1;
+ link->be_hw_params_fixup = msm8996_be_hw_params_fixup;
+ }
+
+ cpu = of_get_child_by_name(np, "cpu");
+ platform = of_get_child_by_name(np, "platform");
+ codec = of_get_child_by_name(np, "codec");
+
+ if (!cpu) {
+ dev_err(dev, "Can't find cpu DT node\n");
+ return -EINVAL;
+>>>>>>>
+ }
+
+ link->cpu_of_node = of_parse_phandle(cpu, "sound-dai", 0);
+ if (!link->cpu_of_node) {
+ dev_err(card->dev, "error getting cpu phandle\n");
+<<<<<<<
+ ret = -EINVAL;
+ goto err;
+=======
+ return -EINVAL;
+ }
+
+ link->platform_of_node = of_parse_phandle(platform,
+ "sound-dai", 0);
+ if (!link->platform_of_node) {
+ dev_err(card->dev, "error getting platform phandle\n");
+ return -EINVAL;
+>>>>>>>
+ }
+
+ ret = snd_soc_of_get_dai_name(cpu, &link->cpu_dai_name);
+ if (ret) {
+ dev_err(card->dev, "error getting cpu dai name\n");
+<<<<<<<
+ goto err;
+ }
+
+ platform = of_get_child_by_name(np, "platform");
+ codec = of_get_child_by_name(np, "codec");
+ if (codec && platform) {
+ link->platform_of_node = of_parse_phandle(platform,
+ "sound-dai",
+ 0);
+ if (!link->platform_of_node) {
+ dev_err(card->dev, "platform dai not found\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = snd_soc_of_get_dai_link_codecs(dev, codec, link);
+ if (ret < 0) {
+ dev_err(card->dev, "codec dai not found\n");
+ goto err;
+ }
+ link->no_pcm = 1;
+ link->ignore_pmdown_time = 1;
+ link->be_hw_params_fixup = apq8096_be_hw_params_fixup;
+ } else {
+ link->platform_of_node = link->cpu_of_node;
+ link->codec_dai_name = "snd-soc-dummy-dai";
+ link->codec_name = "snd-soc-dummy";
+ link->dynamic = 1;
+ }
+
+ link->ignore_suspend = 1;
+ ret = of_property_read_string(np, "link-name", &link->name);
+ if (ret) {
+ dev_err(card->dev, "error getting codec dai_link name\n");
+ goto err;
+ }
+
+ link->dpcm_playback = 1;
+ link->dpcm_capture = 1;
+=======
+ return ret;
+ }
+
+ if (codec) {
+ ret = snd_soc_of_get_dai_link_codecs(dev, codec, link);
+
+ if (ret < 0) {
+ dev_err(card->dev, "error getting codec dai name\n");
+ return ret;
+ }
+ }
+
+ ret = of_property_read_string(np, "link-name", &link->name);
+ if (ret) {
+ dev_err(card->dev, "error getting codec dai_link name\n");
+ return ret;
+ }
+
+>>>>>>>
+ link->stream_name = link->name;
+ link++;
+ }
+
+<<<<<<<
+ return 0;
+err:
+ of_node_put(cpu);
+ of_node_put(codec);
+ of_node_put(platform);
+ kfree(card->dai_link);
+ return ret;
+}
+
+static int apq8096_bind(struct device *dev)
+{
+ struct snd_soc_card *card;
+ int ret;
+
+ card = kzalloc(sizeof(*card), GFP_KERNEL);
+ if (!card)
+ return -ENOMEM;
+
+ component_bind_all(dev, card);
+ card->dev = dev;
+ ret = apq8096_sbc_parse_of(card);
+ if (ret) {
+ dev_err(dev, "Error parsing OF data\n");
+ goto err;
+ }
+
+ ret = snd_soc_register_card(card);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ component_unbind_all(dev, card);
+ kfree(card);
+ return ret;
+}
+
+static void apq8096_unbind(struct device *dev)
+{
+ struct snd_soc_card *card = dev_get_drvdata(dev);
+
+ snd_soc_unregister_card(card);
+ component_unbind_all(dev, card);
+ kfree(card->dai_link);
+ kfree(card);
+}
+
+static const struct component_master_ops apq8096_ops = {
+ .bind = apq8096_bind,
+ .unbind = apq8096_unbind,
+};
+
+static int apq8016_compare_of(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+static void apq8016_release_of(struct device *dev, void *data)
+{
+ of_node_put(data);
+}
+
+static int add_audio_components(struct device *dev,
+ struct component_match **matchptr)
+{
+ struct device_node *np, *platform, *cpu, *node, *dai_node;
+
+ node = dev->of_node;
+
+ for_each_child_of_node(node, np) {
+ cpu = of_get_child_by_name(np, "cpu");
+ if (cpu) {
+ dai_node = of_parse_phandle(cpu, "sound-dai", 0);
+ of_node_get(dai_node);
+ component_match_add_release(dev, matchptr,
+ apq8016_release_of,
+ apq8016_compare_of,
+ dai_node);
+ }
+
+ platform = of_get_child_by_name(np, "platform");
+ if (platform) {
+ dai_node = of_parse_phandle(platform, "sound-dai", 0);
+ component_match_add_release(dev, matchptr,
+ apq8016_release_of,
+ apq8016_compare_of,
+ dai_node);
+ }
+ }
+
+ return 0;
+}
+
+static int apq8096_platform_probe(struct platform_device *pdev)
+{
+ struct component_match *match = NULL;
+ int ret;
+
+ ret = add_audio_components(&pdev->dev, &match);
+ if (ret)
+ return ret;
+
+ return component_master_add_with_match(&pdev->dev, &apq8096_ops, match);
+}
+
+static int apq8096_platform_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &apq8096_ops);
+
+ return 0;
+}
+
+=======
+ return ret;
+}
+
+static int msm_snd_apq8096_probe(struct apr_device *adev)
+{
+ int ret;
+ struct snd_soc_card *card;
+
+ card = devm_kzalloc(&adev->dev, sizeof(*card), GFP_KERNEL);
+ if (!card)
+ return -ENOMEM;
+
+ card->dev = &adev->dev;
+
+ ret = apq8096_sbc_parse_of(card);
+ if (ret) {
+ dev_err(&adev->dev, "Error parsing OF data\n");
+ return ret;
+ }
+
+ ret = devm_snd_soc_register_card(&adev->dev, card);
+ if (ret)
+ dev_err(&adev->dev, "sound card register failed (%d)!\n", ret);
+ else
+ dev_err(&adev->dev, "sound card register Sucessfull\n");
+
+ return ret;
+}
+
+>>>>>>>
+static const struct of_device_id msm_snd_apq8096_dt_match[] = {
+ {.compatible = "qcom,apq8096-sndcard"},
+ {}
+};
+
+<<<<<<<
+MODULE_DEVICE_TABLE(of, msm_snd_apq8096_dt_match);
+
+static struct platform_driver msm_snd_apq8096_driver = {
+ .probe = apq8096_platform_probe,
+ .remove = apq8096_platform_remove,
+=======
+static struct apr_driver msm_snd_apq8096_driver = {
+ .probe = msm_snd_apq8096_probe,
+>>>>>>>
+ .driver = {
+ .name = "msm-snd-apq8096",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_snd_apq8096_dt_match,
+ },
+};
+<<<<<<<
+module_apr_driver(msm_snd_apq8096_driver);
+=======
+module_platform_driver(msm_snd_apq8096_driver);
+>>>>>>>
+MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
+MODULE_DESCRIPTION("APQ8096 ASoC Machine Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/rr-cache/521b913e8a5b94999eff25f480e11e42f1488f77/thisimage b/rr-cache/521b913e8a5b94999eff25f480e11e42f1488f77/thisimage
new file mode 100644
index 0000000..614c0be
--- /dev/null
+++ b/rr-cache/521b913e8a5b94999eff25f480e11e42f1488f77/thisimage
@@ -0,0 +1,403 @@
+// SPDX-License-Identifier: GPL-2.0
+<<<<<<<
+/*
+ * Copyright (c) 2018, Linaro Limited
+ */
+
+#include <linux/soc/qcom/apr.h>
+#include <linux/module.h>
+=======
+// Copyright (c) 2018, Linaro Limited
+
+#include <linux/soc/qcom/apr.h>
+#include <linux/module.h>
+#include <linux/component.h>
+#include <linux/platform_device.h>
+>>>>>>>
+#include <linux/of_device.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+
+<<<<<<<
+static int apq8096_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+=======
+static int msm8996_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+>>>>>>>
+{
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+
+ rate->min = rate->max = 48000;
+ channels->min = channels->max = 2;
+
+ return 0;
+}
+
+static int apq8096_sbc_parse_of(struct snd_soc_card *card)
+{
+<<<<<<<
+ struct device *dev = card->dev;
+ struct snd_soc_dai_link *link;
+ struct device_node *np, *codec, *platform, *cpu, *node = dev->of_node;
+ int ret, num_links;
+ bool is_fe;
+
+
+ ret = snd_soc_of_parse_card_name(card, "qcom,model");
+ if (ret)
+ dev_err(dev, "Error parsing card name: %d\n", ret);
+
+ if (of_property_read_bool(dev->of_node, "qcom,audio-routing"))
+ ret = snd_soc_of_parse_audio_routing(card,
+ "qcom,audio-routing");
+
+ /* Populate links */
+ num_links = of_get_child_count(node);
+
+ dev_info(dev, "Found %d child audio dai links..\n", num_links);
+ /* Allocate the private data and the DAI link array */
+ card->dai_link = devm_kzalloc(dev, sizeof(*link) * num_links,
+ GFP_KERNEL);
+=======
+ struct device_node *np;
+ struct device_node *codec = NULL;
+ struct device_node *platform = NULL;
+ struct device_node *cpu = NULL;
+ struct device *dev = card->dev;
+ struct snd_soc_dai_link *link;
+ int ret, num_links;
+
+ ret = snd_soc_of_parse_card_name(card, "qcom,model");
+ if (ret) {
+ dev_err(dev, "Error parsing card name: %d\n", ret);
+ return ret;
+ }
+
+ /* DAPM routes */
+ if (of_property_read_bool(dev->of_node, "qcom,audio-routing")) {
+ ret = snd_soc_of_parse_audio_routing(card,
+ "qcom,audio-routing");
+ if (ret)
+ return ret;
+ }
+
+ /* Populate links */
+ num_links = of_get_child_count(dev->of_node);
+
+ /* Allocate the DAI link array */
+ card->dai_link = kcalloc(num_links, sizeof(*link), GFP_KERNEL);
+>>>>>>>
+ if (!card->dai_link)
+ return -ENOMEM;
+
+ card->num_links = num_links;
+<<<<<<<
+ link = card->dai_link;
+
+ for_each_child_of_node(dev->of_node, np) {
+ cpu = of_get_child_by_name(np, "cpu");
+ if (!cpu) {
+ dev_err(dev, "Can't find cpu DT node\n");
+ ret = -EINVAL;
+ goto err;
+=======
+
+ link = card->dai_link;
+
+ for_each_child_of_node(node, np) {
+ is_fe = false;
+ if (of_property_read_bool(np, "is-fe"))
+ is_fe = true;
+
+ if (is_fe) {
+ /* BE is dummy */
+ link->codec_of_node = NULL;
+ link->codec_dai_name = "snd-soc-dummy-dai";
+ link->codec_name = "snd-soc-dummy";
+
+ /* FE settings */
+ link->dynamic = 1;
+ link->dpcm_playback = 1;
+
+ } else {
+ link->no_pcm = 1;
+ link->dpcm_playback = 1;
+ link->ignore_suspend = 1;
+ link->ignore_pmdown_time = 1;
+ link->be_hw_params_fixup = msm8996_be_hw_params_fixup;
+ }
+
+ cpu = of_get_child_by_name(np, "cpu");
+ platform = of_get_child_by_name(np, "platform");
+ codec = of_get_child_by_name(np, "codec");
+
+ if (!cpu) {
+ dev_err(dev, "Can't find cpu DT node\n");
+ return -EINVAL;
+>>>>>>>
+ }
+
+ link->cpu_of_node = of_parse_phandle(cpu, "sound-dai", 0);
+ if (!link->cpu_of_node) {
+ dev_err(card->dev, "error getting cpu phandle\n");
+<<<<<<<
+ ret = -EINVAL;
+ goto err;
+=======
+ return -EINVAL;
+ }
+
+ link->platform_of_node = of_parse_phandle(platform,
+ "sound-dai", 0);
+ if (!link->platform_of_node) {
+ dev_err(card->dev, "error getting platform phandle\n");
+ return -EINVAL;
+>>>>>>>
+ }
+
+ ret = snd_soc_of_get_dai_name(cpu, &link->cpu_dai_name);
+ if (ret) {
+ dev_err(card->dev, "error getting cpu dai name\n");
+<<<<<<<
+ goto err;
+ }
+
+ platform = of_get_child_by_name(np, "platform");
+ codec = of_get_child_by_name(np, "codec");
+ if (codec && platform) {
+ link->platform_of_node = of_parse_phandle(platform,
+ "sound-dai",
+ 0);
+ if (!link->platform_of_node) {
+ dev_err(card->dev, "platform dai not found\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = snd_soc_of_get_dai_link_codecs(dev, codec, link);
+ if (ret < 0) {
+ dev_err(card->dev, "codec dai not found\n");
+ goto err;
+ }
+ link->no_pcm = 1;
+ link->ignore_pmdown_time = 1;
+ link->be_hw_params_fixup = apq8096_be_hw_params_fixup;
+ } else {
+ link->platform_of_node = link->cpu_of_node;
+ link->codec_dai_name = "snd-soc-dummy-dai";
+ link->codec_name = "snd-soc-dummy";
+ link->dynamic = 1;
+ }
+
+ link->ignore_suspend = 1;
+ ret = of_property_read_string(np, "link-name", &link->name);
+ if (ret) {
+ dev_err(card->dev, "error getting codec dai_link name\n");
+ goto err;
+ }
+
+ link->dpcm_playback = 1;
+ link->dpcm_capture = 1;
+=======
+ return ret;
+ }
+
+ if (codec) {
+ ret = snd_soc_of_get_dai_link_codecs(dev, codec, link);
+
+ if (ret < 0) {
+ dev_err(card->dev, "error getting codec dai name\n");
+ return ret;
+ }
+ }
+
+ ret = of_property_read_string(np, "link-name", &link->name);
+ if (ret) {
+ dev_err(card->dev, "error getting codec dai_link name\n");
+ return ret;
+ }
+
+>>>>>>>
+ link->stream_name = link->name;
+ link++;
+ }
+
+<<<<<<<
+ return 0;
+err:
+ of_node_put(cpu);
+ of_node_put(codec);
+ of_node_put(platform);
+ kfree(card->dai_link);
+ return ret;
+}
+
+static int apq8096_bind(struct device *dev)
+{
+ struct snd_soc_card *card;
+ int ret;
+
+ card = kzalloc(sizeof(*card), GFP_KERNEL);
+ if (!card)
+ return -ENOMEM;
+
+ component_bind_all(dev, card);
+ card->dev = dev;
+ ret = apq8096_sbc_parse_of(card);
+ if (ret) {
+ dev_err(dev, "Error parsing OF data\n");
+ goto err;
+ }
+
+ ret = snd_soc_register_card(card);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ component_unbind_all(dev, card);
+ kfree(card);
+ return ret;
+}
+
+static void apq8096_unbind(struct device *dev)
+{
+ struct snd_soc_card *card = dev_get_drvdata(dev);
+
+ snd_soc_unregister_card(card);
+ component_unbind_all(dev, card);
+ kfree(card->dai_link);
+ kfree(card);
+}
+
+static const struct component_master_ops apq8096_ops = {
+ .bind = apq8096_bind,
+ .unbind = apq8096_unbind,
+};
+
+static int apq8016_compare_of(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+static void apq8016_release_of(struct device *dev, void *data)
+{
+ of_node_put(data);
+}
+
+static int add_audio_components(struct device *dev,
+ struct component_match **matchptr)
+{
+ struct device_node *np, *platform, *cpu, *node, *dai_node;
+
+ node = dev->of_node;
+
+ for_each_child_of_node(node, np) {
+ cpu = of_get_child_by_name(np, "cpu");
+ if (cpu) {
+ dai_node = of_parse_phandle(cpu, "sound-dai", 0);
+ of_node_get(dai_node);
+ component_match_add_release(dev, matchptr,
+ apq8016_release_of,
+ apq8016_compare_of,
+ dai_node);
+ }
+
+ platform = of_get_child_by_name(np, "platform");
+ if (platform) {
+ dai_node = of_parse_phandle(platform, "sound-dai", 0);
+ component_match_add_release(dev, matchptr,
+ apq8016_release_of,
+ apq8016_compare_of,
+ dai_node);
+ }
+ }
+
+ return 0;
+}
+
+static int apq8096_platform_probe(struct platform_device *pdev)
+{
+ struct component_match *match = NULL;
+ int ret;
+
+ ret = add_audio_components(&pdev->dev, &match);
+ if (ret)
+ return ret;
+
+ return component_master_add_with_match(&pdev->dev, &apq8096_ops, match);
+}
+
+static int apq8096_platform_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &apq8096_ops);
+
+ return 0;
+}
+
+=======
+ return ret;
+}
+
+static int msm_snd_apq8096_probe(struct apr_device *adev)
+{
+ int ret;
+ struct snd_soc_card *card;
+
+ card = devm_kzalloc(&adev->dev, sizeof(*card), GFP_KERNEL);
+ if (!card)
+ return -ENOMEM;
+
+ card->dev = &adev->dev;
+
+ ret = apq8096_sbc_parse_of(card);
+ if (ret) {
+ dev_err(&adev->dev, "Error parsing OF data\n");
+ return ret;
+ }
+
+ ret = devm_snd_soc_register_card(&adev->dev, card);
+ if (ret)
+ dev_err(&adev->dev, "sound card register failed (%d)!\n", ret);
+ else
+ dev_err(&adev->dev, "sound card register Sucessfull\n");
+
+ return ret;
+}
+
+>>>>>>>
+static const struct of_device_id msm_snd_apq8096_dt_match[] = {
+ {.compatible = "qcom,apq8096-sndcard"},
+ {}
+};
+
+<<<<<<<
+MODULE_DEVICE_TABLE(of, msm_snd_apq8096_dt_match);
+
+static struct platform_driver msm_snd_apq8096_driver = {
+ .probe = apq8096_platform_probe,
+ .remove = apq8096_platform_remove,
+=======
+static struct apr_driver msm_snd_apq8096_driver = {
+ .probe = msm_snd_apq8096_probe,
+>>>>>>>
+ .driver = {
+ .name = "msm-snd-apq8096",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_snd_apq8096_dt_match,
+ },
+};
+<<<<<<<
+module_apr_driver(msm_snd_apq8096_driver);
+=======
+module_platform_driver(msm_snd_apq8096_driver);
+>>>>>>>
+MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
+MODULE_DESCRIPTION("APQ8096 ASoC Machine Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/rr-cache/55079890805cf5c92a96f3ecef732cb5a510f73e/postimage b/rr-cache/55079890805cf5c92a96f3ecef732cb5a510f73e/postimage
new file mode 100644
index 0000000..bb2e562
--- /dev/null
+++ b/rr-cache/55079890805cf5c92a96f3ecef732cb5a510f73e/postimage
@@ -0,0 +1,1351 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/qcom,gcc-msm8996.h>
+#include <dt-bindings/clock/qcom,mmcc-msm8996.h>
+#include <dt-bindings/clock/qcom,rpmcc.h>
+
+/ {
+ model = "Qualcomm Technologies, Inc. MSM8996";
+
+ interrupt-parent = <&intc>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen { };
+
+ memory {
+ device_type = "memory";
+ /* We expect the bootloader to fill in the reg */
+ reg = <0 0 0 0>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ mba_region: mba@91500000 {
+ reg = <0x0 0x91500000 0x0 0x200000>;
+ no-map;
+ };
+
+ slpi_region: slpi@90b00000 {
+ reg = <0x0 0x90b00000 0x0 0xa00000>;
+ no-map;
+ };
+
+ venus_region: venus@90400000 {
+ reg = <0x0 0x90400000 0x0 0x700000>;
+ no-map;
+ };
+
+ adsp_region: adsp@8ea00000 {
+ reg = <0x0 0x8ea00000 0x0 0x1a00000>;
+ no-map;
+ };
+
+ mpss_region: mpss@88800000 {
+ reg = <0x0 0x88800000 0x0 0x6200000>;
+ no-map;
+ };
+
+ smem_mem: smem-mem@86000000 {
+ reg = <0x0 0x86000000 0x0 0x200000>;
+ no-map;
+ };
+
+ memory@85800000 {
+ reg = <0x0 0x85800000 0x0 0x800000>;
+ no-map;
+ };
+
+ memory@86200000 {
+ reg = <0x0 0x86200000 0x0 0x2600000>;
+ no-map;
+ };
+
+ rmtfs@86700000 {
+ compatible = "qcom,rmtfs-mem";
+
+ size = <0x0 0x200000>;
+ alloc-ranges = <0x0 0xa0000000 0x0 0x2000000>;
+ no-map;
+
+ qcom,client-id = <1>;
+ qcom,vmid = <15>;
+ };
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ CPU0: cpu@0 {
+ device_type = "cpu";
+ compatible = "qcom,kryo";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ next-level-cache = <&L2_0>;
+ L2_0: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ };
+ };
+
+ CPU1: cpu@1 {
+ device_type = "cpu";
+ compatible = "qcom,kryo";
+ reg = <0x0 0x1>;
+ enable-method = "psci";
+ next-level-cache = <&L2_0>;
+ };
+
+ CPU2: cpu@100 {
+ device_type = "cpu";
+ compatible = "qcom,kryo";
+ reg = <0x0 0x100>;
+ enable-method = "psci";
+ next-level-cache = <&L2_1>;
+ L2_1: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ };
+ };
+
+ CPU3: cpu@101 {
+ device_type = "cpu";
+ compatible = "qcom,kryo";
+ reg = <0x0 0x101>;
+ enable-method = "psci";
+ next-level-cache = <&L2_1>;
+ };
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&CPU0>;
+ };
+
+ core1 {
+ cpu = <&CPU1>;
+ };
+ };
+
+ cluster1 {
+ core0 {
+ cpu = <&CPU2>;
+ };
+
+ core1 {
+ cpu = <&CPU3>;
+ };
+ };
+ };
+ };
+
+ thermal-zones {
+ cpu-thermal0 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 3>;
+
+ trips {
+ cpu_alert0: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit0: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal1 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 5>;
+
+ trips {
+ cpu_alert1: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit1: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal2 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 8>;
+
+ trips {
+ cpu_alert2: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit2: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal3 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 10>;
+
+ trips {
+ cpu_alert3: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit3: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ clocks {
+ xo_board: xo_board {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <19200000>;
+ clock-output-names = "xo_board";
+ };
+
+ sleep_clk: sleep_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32764>;
+ clock-output-names = "sleep_clk";
+ };
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ firmware {
+ scm {
+ compatible = "qcom,scm-msm8996";
+
+ qcom,dload-mode = <&tcsr 0x13000>;
+ };
+ };
+
+ tcsr_mutex: hwlock {
+ compatible = "qcom,tcsr-mutex";
+ syscon = <&tcsr_mutex_regs 0 0x1000>;
+ #hwlock-cells = <1>;
+ };
+
+ smem {
+ compatible = "qcom,smem";
+ memory-region = <&smem_mem>;
+ hwlocks = <&tcsr_mutex 3>;
+ };
+
+ rpm-glink {
+ compatible = "qcom,glink-rpm";
+
+ interrupts = <GIC_SPI 168 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,rpm-msg-ram = <&rpm_msg_ram>;
+
+ mboxes = <&apcs_glb 0>;
+
+ rpm_requests {
+ compatible = "qcom,rpm-msm8996";
+ qcom,glink-channels = "rpm_requests";
+
+ rpmcc: qcom,rpmcc {
+ compatible = "qcom,rpmcc-msm8996";
+ #clock-cells = <1>;
+ };
+
+ pm8994-regulators {
+ compatible = "qcom,rpm-pm8994-regulators";
+
+ pm8994_s1: s1 {};
+ pm8994_s2: s2 {};
+ pm8994_s3: s3 {};
+ pm8994_s4: s4 {};
+ pm8994_s5: s5 {};
+ pm8994_s6: s6 {};
+ pm8994_s7: s7 {};
+ pm8994_s8: s8 {};
+ pm8994_s9: s9 {};
+ pm8994_s10: s10 {};
+ pm8994_s11: s11 {};
+ pm8994_s12: s12 {};
+
+ pm8994_l1: l1 {};
+ pm8994_l2: l2 {};
+ pm8994_l3: l3 {};
+ pm8994_l4: l4 {};
+ pm8994_l5: l5 {};
+ pm8994_l6: l6 {};
+ pm8994_l7: l7 {};
+ pm8994_l8: l8 {};
+ pm8994_l9: l9 {};
+ pm8994_l10: l10 {};
+ pm8994_l11: l11 {};
+ pm8994_l12: l12 {};
+ pm8994_l13: l13 {};
+ pm8994_l14: l14 {};
+ pm8994_l15: l15 {};
+ pm8994_l16: l16 {};
+ pm8994_l17: l17 {};
+ pm8994_l18: l18 {};
+ pm8994_l19: l19 {};
+ pm8994_l20: l20 {};
+ pm8994_l21: l21 {};
+ pm8994_l22: l22 {};
+ pm8994_l23: l23 {};
+ pm8994_l24: l24 {};
+ pm8994_l25: l25 {};
+ pm8994_l26: l26 {};
+ pm8994_l27: l27 {};
+ pm8994_l28: l28 {};
+ pm8994_l29: l29 {};
+ pm8994_l30: l30 {};
+ pm8994_l31: l31 {};
+ pm8994_l32: l32 {};
+ };
+
+ };
+ };
+
+ soc: soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0xffffffff>;
+ compatible = "simple-bus";
+
+ rpm_msg_ram: memory@68000 {
+ compatible = "qcom,rpm-msg-ram";
+ reg = <0x68000 0x6000>;
+ };
+
+ tcsr_mutex_regs: syscon@740000 {
+ compatible = "syscon";
+ reg = <0x740000 0x20000>;
+ };
+
+ tcsr: syscon@7a0000 {
+ compatible = "qcom,tcsr-msm8996", "syscon";
+ reg = <0x7a0000 0x18000>;
+ };
+
+ intc: interrupt-controller@9bc0000 {
+ compatible = "arm,gic-v3";
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ #redistributor-regions = <1>;
+ redistributor-stride = <0x0 0x40000>;
+ reg = <0x09bc0000 0x10000>,
+ <0x09c00000 0x100000>;
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ apcs: syscon@9820000 {
+ compatible = "syscon";
+ reg = <0x9820000 0x1000>;
+ };
+
+ apcs_glb: mailbox@9820000 {
+ compatible = "qcom,msm8996-apcs-hmss-global";
+ reg = <0x9820000 0x1000>;
+
+ #mbox-cells = <1>;
+ };
+
+ gcc: clock-controller@300000 {
+ compatible = "qcom,gcc-msm8996";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ reg = <0x300000 0x90000>;
+ };
+
+ kryocc: clock-controller@6400000 {
+ compatible = "qcom,apcc-msm8996";
+ reg = <0x6400000 0x90000>;
+ #clock-cells = <1>;
+ };
+
+ blsp1_uart1: serial@7570000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0x07570000 0x1000>;
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_UART2_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ status = "disabled";
+ };
+
+ blsp1_spi0: spi@7575000 {
+ compatible = "qcom,spi-qup-v2.2.1";
+ reg = <0x07575000 0x600>;
+ interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_QUP1_SPI_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp1_spi0_default>;
+ pinctrl-1 = <&blsp1_spi0_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp2_i2c0: i2c@75b5000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x075b5000 0x1000>;
+ interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_AHB_CLK>,
+ <&gcc GCC_BLSP2_QUP1_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_i2c0_default>;
+ pinctrl-1 = <&blsp2_i2c0_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ tsens0: thermal-sensor@4a8000 {
+ compatible = "qcom,msm8996-tsens";
+ reg = <0x4a8000 0x2000>;
+ #thermal-sensor-cells = <1>;
+ };
+
+ blsp2_uart1: serial@75b0000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0x75b0000 0x1000>;
+ interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_UART2_APPS_CLK>,
+ <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "core", "iface";
+ status = "disabled";
+ };
+
+ blsp2_i2c1: i2c@75b6000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x075b6000 0x1000>;
+ interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_AHB_CLK>,
+ <&gcc GCC_BLSP2_QUP2_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_i2c1_default>;
+ pinctrl-1 = <&blsp2_i2c1_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp2_uart2: serial@75b1000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0x075b1000 0x1000>;
+ interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_UART3_APPS_CLK>,
+ <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "core", "iface";
+ status = "disabled";
+ };
+
+ blsp1_i2c2: i2c@7577000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x07577000 0x1000>;
+ interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_AHB_CLK>,
+ <&gcc GCC_BLSP1_QUP3_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp1_i2c2_default>;
+ pinctrl-1 = <&blsp1_i2c2_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp2_spi5: spi@75ba000{
+ compatible = "qcom,spi-qup-v2.2.1";
+ reg = <0x075ba000 0x600>;
+ interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_QUP6_SPI_APPS_CLK>,
+ <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "core", "iface";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_spi5_default>;
+ pinctrl-1 = <&blsp2_spi5_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ sdhc2: sdhci@74a4900 {
+ status = "disabled";
+ compatible = "qcom,sdhci-msm-v4";
+ reg = <0x74a4900 0x314>, <0x74a4000 0x800>;
+ reg-names = "hc_mem", "core_mem";
+
+ interrupts = <0 125 IRQ_TYPE_LEVEL_HIGH>,
+ <0 221 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hc_irq", "pwr_irq";
+
+ clock-names = "iface", "core", "xo";
+ clocks = <&gcc GCC_SDCC2_AHB_CLK>,
+ <&gcc GCC_SDCC2_APPS_CLK>,
+ <&xo_board>;
+ bus-width = <4>;
+ };
+
+ msmgpio: pinctrl@1010000 {
+ compatible = "qcom,msm8996-pinctrl";
+ reg = <0x01010000 0x300000>;
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ timer@9840000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ compatible = "arm,armv7-timer-mem";
+ reg = <0x09840000 0x1000>;
+ clock-frequency = <19200000>;
+
+ frame@9850000 {
+ frame-number = <0>;
+ interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x09850000 0x1000>,
+ <0x09860000 0x1000>;
+ };
+
+ frame@9870000 {
+ frame-number = <1>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x09870000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@9880000 {
+ frame-number = <2>;
+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x09880000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@9890000 {
+ frame-number = <3>;
+ interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x09890000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@98a0000 {
+ frame-number = <4>;
+ interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x098a0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@98b0000 {
+ frame-number = <5>;
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x098b0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@98c0000 {
+ frame-number = <6>;
+ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x098c0000 0x1000>;
+ status = "disabled";
+ };
+ };
+
+ spmi_bus: qcom,spmi@400f000 {
+ compatible = "qcom,spmi-pmic-arb";
+ reg = <0x400f000 0x1000>,
+ <0x4400000 0x800000>,
+ <0x4c00000 0x800000>,
+ <0x5800000 0x200000>,
+ <0x400a000 0x002100>;
+ reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
+ interrupt-names = "periph_irq";
+ interrupts = <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,ee = <0>;
+ qcom,channel = <0>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ interrupt-controller;
+ #interrupt-cells = <4>;
+ };
+
+ ufsphy: phy@627000 {
+ compatible = "qcom,msm8996-ufs-phy-qmp-14nm";
+ reg = <0x627000 0xda8>;
+ reg-names = "phy_mem";
+ #phy-cells = <0>;
+
+ vdda-phy-supply = <&pm8994_l28>;
+ vdda-pll-supply = <&pm8994_l12>;
+
+ vdda-phy-max-microamp = <18380>;
+ vdda-pll-max-microamp = <9440>;
+
+ vddp-ref-clk-supply = <&pm8994_l25>;
+ vddp-ref-clk-max-microamp = <100>;
+ vddp-ref-clk-always-on;
+
+ clock-names = "ref_clk_src", "ref_clk";
+ clocks = <&rpmcc RPM_SMD_LN_BB_CLK>,
+ <&gcc GCC_UFS_CLKREF_CLK>;
+ status = "disabled";
+ };
+
+ ufshc@624000 {
+ compatible = "qcom,ufshc";
+ reg = <0x624000 0x2500>;
+ interrupts = <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>;
+
+ phys = <&ufsphy>;
+ phy-names = "ufsphy";
+
+ vcc-supply = <&pm8994_l20>;
+ vccq-supply = <&pm8994_l25>;
+ vccq2-supply = <&pm8994_s4>;
+
+ vcc-max-microamp = <600000>;
+ vccq-max-microamp = <450000>;
+ vccq2-max-microamp = <450000>;
+
+ power-domains = <&gcc UFS_GDSC>;
+
+ clock-names =
+ "core_clk_src",
+ "core_clk",
+ "bus_clk",
+ "bus_aggr_clk",
+ "iface_clk",
+ "core_clk_unipro_src",
+ "core_clk_unipro",
+ "core_clk_ice",
+ "ref_clk",
+ "tx_lane0_sync_clk",
+ "rx_lane0_sync_clk";
+ clocks =
+ <&gcc UFS_AXI_CLK_SRC>,
+ <&gcc GCC_UFS_AXI_CLK>,
+ <&gcc GCC_SYS_NOC_UFS_AXI_CLK>,
+ <&gcc GCC_AGGRE2_UFS_AXI_CLK>,
+ <&gcc GCC_UFS_AHB_CLK>,
+ <&gcc UFS_ICE_CORE_CLK_SRC>,
+ <&gcc GCC_UFS_UNIPRO_CORE_CLK>,
+ <&gcc GCC_UFS_ICE_CORE_CLK>,
+ <&rpmcc RPM_SMD_LN_BB_CLK>,
+ <&gcc GCC_UFS_TX_SYMBOL_0_CLK>,
+ <&gcc GCC_UFS_RX_SYMBOL_0_CLK>;
+ freq-table-hz =
+ <100000000 200000000>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <150000000 300000000>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>;
+
+ lanes-per-direction = <1>;
+ status = "disabled";
+
+ ufs_variant {
+ compatible = "qcom,ufs_variant";
+ };
+ };
+
+ mmcc: clock-controller@8c0000 {
+ compatible = "qcom,mmcc-msm8996";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ reg = <0x8c0000 0x40000>;
+ assigned-clocks = <&mmcc MMPLL9_PLL>,
+ <&mmcc MMPLL1_PLL>,
+ <&mmcc MMPLL3_PLL>,
+ <&mmcc MMPLL4_PLL>,
+ <&mmcc MMPLL5_PLL>;
+ assigned-clock-rates = <624000000>,
+ <810000000>,
+ <980000000>,
+ <960000000>,
+ <825000000>;
+ };
+
+ qfprom@74000 {
+ compatible = "qcom,qfprom";
+ reg = <0x74000 0x8ff>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ qusb2p_hstx_trim: hstx_trim@24e {
+ reg = <0x24e 0x2>;
+ bits = <5 4>;
+ };
+
+ qusb2s_hstx_trim: hstx_trim@24f {
+ reg = <0x24f 0x1>;
+ bits = <1 4>;
+ };
+ };
+
+ phy@34000 {
+ compatible = "qcom,msm8996-qmp-pcie-phy";
+ reg = <0x34000 0x488>;
+ #clock-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
+ <&gcc GCC_PCIE_PHY_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_CLKREF_CLK>;
+ clock-names = "aux", "cfg_ahb", "ref";
+
+ vdda-phy-supply = <&pm8994_l28>;
+ vdda-pll-supply = <&pm8994_l12>;
+
+ resets = <&gcc GCC_PCIE_PHY_BCR>,
+ <&gcc GCC_PCIE_PHY_COM_BCR>,
+ <&gcc GCC_PCIE_PHY_COM_NOCSR_BCR>;
+ reset-names = "phy", "common", "cfg";
+ status = "disabled";
+
+ pciephy_0: lane@35000 {
+ reg = <0x035000 0x130>,
+ <0x035200 0x200>,
+ <0x035400 0x1dc>;
+ #phy-cells = <0>;
+
+ clock-output-names = "pcie_0_pipe_clk_src";
+ clocks = <&gcc GCC_PCIE_0_PIPE_CLK>;
+ clock-names = "pipe0";
+ resets = <&gcc GCC_PCIE_0_PHY_BCR>;
+ reset-names = "lane0";
+ };
+
+ pciephy_1: lane@36000 {
+ reg = <0x036000 0x130>,
+ <0x036200 0x200>,
+ <0x036400 0x1dc>;
+ #phy-cells = <0>;
+
+ clock-output-names = "pcie_1_pipe_clk_src";
+ clocks = <&gcc GCC_PCIE_1_PIPE_CLK>;
+ clock-names = "pipe1";
+ resets = <&gcc GCC_PCIE_1_PHY_BCR>;
+ reset-names = "lane1";
+ };
+
+ pciephy_2: lane@37000 {
+ reg = <0x037000 0x130>,
+ <0x037200 0x200>,
+ <0x037400 0x1dc>;
+ #phy-cells = <0>;
+
+ clock-output-names = "pcie_2_pipe_clk_src";
+ clocks = <&gcc GCC_PCIE_2_PIPE_CLK>;
+ clock-names = "pipe2";
+ resets = <&gcc GCC_PCIE_2_PHY_BCR>;
+ reset-names = "lane2";
+ };
+ };
+
+ phy@7410000 {
+ compatible = "qcom,msm8996-qmp-usb3-phy";
+ reg = <0x7410000 0x1c4>;
+ #clock-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clocks = <&gcc GCC_USB3_PHY_AUX_CLK>,
+ <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+ <&gcc GCC_USB3_CLKREF_CLK>;
+ clock-names = "aux", "cfg_ahb", "ref";
+
+ vdda-phy-supply = <&pm8994_l28>;
+ vdda-pll-supply = <&pm8994_l12>;
+
+ resets = <&gcc GCC_USB3_PHY_BCR>,
+ <&gcc GCC_USB3PHY_PHY_BCR>;
+ reset-names = "phy", "common";
+ status = "disabled";
+
+ ssusb_phy_0: lane@7410200 {
+ reg = <0x7410200 0x200>,
+ <0x7410400 0x130>,
+ <0x7410600 0x1a8>;
+ #phy-cells = <0>;
+
+ clock-output-names = "usb3_phy_pipe_clk_src";
+ clocks = <&gcc GCC_USB3_PHY_PIPE_CLK>;
+ clock-names = "pipe0";
+ };
+ };
+
+ hsusb_phy1: phy@7411000 {
+ compatible = "qcom,msm8996-qusb2-phy";
+ reg = <0x7411000 0x180>;
+ #phy-cells = <0>;
+
+ clocks = <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+ <&gcc GCC_RX1_USB2_CLKREF_CLK>;
+ clock-names = "cfg_ahb", "ref";
+
+ vdda-pll-supply = <&pm8994_l12>;
+ vdda-phy-dpdm-supply = <&pm8994_l24>;
+
+ resets = <&gcc GCC_QUSB2PHY_PRIM_BCR>;
+ nvmem-cells = <&qusb2p_hstx_trim>;
+ status = "disabled";
+ };
+
+ hsusb_phy2: phy@7412000 {
+ compatible = "qcom,msm8996-qusb2-phy";
+ reg = <0x7412000 0x180>;
+ #phy-cells = <0>;
+
+ clocks = <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+ <&gcc GCC_RX2_USB2_CLKREF_CLK>;
+ clock-names = "cfg_ahb", "ref";
+
+ vdda-pll-supply = <&pm8994_l12>;
+ vdda-phy-dpdm-supply = <&pm8994_l24>;
+
+ resets = <&gcc GCC_QUSB2PHY_SEC_BCR>;
+ nvmem-cells = <&qusb2s_hstx_trim>;
+ status = "disabled";
+ };
+
+ usb2: usb@7600000 {
+ compatible = "qcom,dwc3";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clocks = <&gcc GCC_PERIPH_NOC_USB20_AHB_CLK>,
+ <&gcc GCC_USB20_MASTER_CLK>,
+ <&gcc GCC_USB20_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB20_SLEEP_CLK>,
+ <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
+
+ assigned-clocks = <&gcc GCC_USB20_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB20_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <60000000>;
+
+ power-domains = <&gcc USB30_GDSC>;
+ status = "disabled";
+
+ dwc3@7600000 {
+ compatible = "snps,dwc3";
+ reg = <0x7600000 0xcc00>;
+ interrupts = <0 138 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&hsusb_phy2>;
+ phy-names = "usb2-phy";
+ };
+ };
+
+ usb3: usb@6a00000 {
+ compatible = "qcom,dwc3";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clocks = <&gcc GCC_SYS_NOC_USB3_AXI_CLK>,
+ <&gcc GCC_USB30_MASTER_CLK>,
+ <&gcc GCC_AGGRE2_USB3_AXI_CLK>,
+ <&gcc GCC_USB30_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB30_SLEEP_CLK>,
+ <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
+
+ assigned-clocks = <&gcc GCC_USB30_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB30_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <120000000>;
+
+ power-domains = <&gcc USB30_GDSC>;
+ status = "disabled";
+
+ dwc3@6a00000 {
+ compatible = "snps,dwc3";
+ reg = <0x6a00000 0xcc00>;
+ interrupts = <0 131 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&hsusb_phy1>, <&ssusb_phy_0>;
+ phy-names = "usb2-phy", "usb3-phy";
+ };
+ };
+
+ agnoc@0 {
+ power-domains = <&gcc AGGRE0_NOC_GDSC>;
+ compatible = "simple-pm-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ pcie0: pcie@600000 {
+ compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
+ status = "disabled";
+ power-domains = <&gcc PCIE0_GDSC>;
+ bus-range = <0x00 0xff>;
+ num-lanes = <1>;
+
+ reg = <0x00600000 0x2000>,
+ <0x0c000000 0xf1d>,
+ <0x0c000f20 0xa8>,
+ <0x0c100000 0x100000>;
+ reg-names = "parf", "dbi", "elbi","config";
+
+ phys = <&pciephy_0>;
+ phy-names = "pciephy";
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x01000000 0x0 0x0c200000 0x0c200000 0x0 0x100000>,
+ <0x02000000 0x0 0x0c300000 0x0c300000 0x0 0xd00000>;
+
+ interrupts = <GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 244 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+ <0 0 0 2 &intc 0 245 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+ <0 0 0 3 &intc 0 247 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+ <0 0 0 4 &intc 0 248 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pcie0_clkreq_default &pcie0_perst_default &pcie0_wake_default>;
+ pinctrl-1 = <&pcie0_clkreq_sleep &pcie0_perst_default &pcie0_wake_sleep>;
+
+
+ vdda-supply = <&pm8994_l28>;
+
+ linux,pci-domain = <0>;
+
+ clocks = <&gcc GCC_PCIE_0_PIPE_CLK>,
+ <&gcc GCC_PCIE_0_AUX_CLK>,
+ <&gcc GCC_PCIE_0_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_0_MSTR_AXI_CLK>,
+ <&gcc GCC_PCIE_0_SLV_AXI_CLK>;
+
+ clock-names = "pipe",
+ "aux",
+ "cfg",
+ "bus_master",
+ "bus_slave";
+
+ };
+
+ pcie1: pcie@608000 {
+ compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
+ power-domains = <&gcc PCIE1_GDSC>;
+ bus-range = <0x00 0xff>;
+ num-lanes = <1>;
+
+ status = "disabled";
+
+ reg = <0x00608000 0x2000>,
+ <0x0d000000 0xf1d>,
+ <0x0d000f20 0xa8>,
+ <0x0d100000 0x100000>;
+
+ reg-names = "parf", "dbi", "elbi","config";
+
+ phys = <&pciephy_1>;
+ phy-names = "pciephy";
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x01000000 0x0 0x0d200000 0x0d200000 0x0 0x100000>,
+ <0x02000000 0x0 0x0d300000 0x0d300000 0x0 0xd00000>;
+
+ interrupts = <GIC_SPI 413 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 272 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+ <0 0 0 2 &intc 0 273 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+ <0 0 0 3 &intc 0 274 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+ <0 0 0 4 &intc 0 275 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pcie1_clkreq_default &pcie1_perst_default &pcie1_wake_default>;
+ pinctrl-1 = <&pcie1_clkreq_sleep &pcie1_perst_default &pcie1_wake_sleep>;
+
+
+ vdda-supply = <&pm8994_l28>;
+ linux,pci-domain = <1>;
+
+ clocks = <&gcc GCC_PCIE_1_PIPE_CLK>,
+ <&gcc GCC_PCIE_1_AUX_CLK>,
+ <&gcc GCC_PCIE_1_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_1_MSTR_AXI_CLK>,
+ <&gcc GCC_PCIE_1_SLV_AXI_CLK>;
+
+ clock-names = "pipe",
+ "aux",
+ "cfg",
+ "bus_master",
+ "bus_slave";
+ };
+
+ pcie2: pcie@610000 {
+ compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
+ power-domains = <&gcc PCIE2_GDSC>;
+ bus-range = <0x00 0xff>;
+ num-lanes = <1>;
+ status = "disabled";
+ reg = <0x00610000 0x2000>,
+ <0x0e000000 0xf1d>,
+ <0x0e000f20 0xa8>,
+ <0x0e100000 0x100000>;
+
+ reg-names = "parf", "dbi", "elbi","config";
+
+ phys = <&pciephy_2>;
+ phy-names = "pciephy";
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x01000000 0x0 0x0e200000 0x0e200000 0x0 0x100000>,
+ <0x02000000 0x0 0x0e300000 0x0e300000 0x0 0x1d00000>;
+
+ device_type = "pci";
+
+ interrupts = <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 142 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+ <0 0 0 2 &intc 0 143 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+ <0 0 0 3 &intc 0 144 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+ <0 0 0 4 &intc 0 145 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pcie2_clkreq_default &pcie2_perst_default &pcie2_wake_default>;
+ pinctrl-1 = <&pcie2_clkreq_sleep &pcie2_perst_default &pcie2_wake_sleep >;
+
+ vdda-supply = <&pm8994_l28>;
+
+ linux,pci-domain = <2>;
+ clocks = <&gcc GCC_PCIE_2_PIPE_CLK>,
+ <&gcc GCC_PCIE_2_AUX_CLK>,
+ <&gcc GCC_PCIE_2_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_2_MSTR_AXI_CLK>,
+ <&gcc GCC_PCIE_2_SLV_AXI_CLK>;
+
+ clock-names = "pipe",
+ "aux",
+ "cfg",
+ "bus_master",
+ "bus_slave";
+ };
+ };
+
+ mdp_smmu: arm,smmu@d00000 {
+ compatible = "qcom,msm8996-smmu-v2";
+ reg = <0xd00000 0x10000>;
+
+ #global-interrupts = <1>;
+ interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>;
+ #iommu-cells = <1>;
+
+ power-domains = <&mmcc MDSS_GDSC>;
+
+ clocks = <&mmcc SMMU_MDP_AHB_CLK>,
+ <&mmcc SMMU_MDP_AXI_CLK>;
+ clock-names = "iface", "bus";
+
+ status = "okay";
+ };
+
+ mdss: mdss@900000 {
+ compatible = "qcom,mdss";
+
+ reg = <0x900000 0x1000>,
+ <0x9b0000 0x1040>,
+ <0x9b8000 0x1040>;
+ reg-names = "mdss_phys",
+ "vbif_phys",
+ "vbif_nrt_phys";
+
+ power-domains = <&mmcc MDSS_GDSC>;
+ interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ clocks = <&mmcc MDSS_AHB_CLK>;
+ clock-names = "iface_clk";
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ mdp: mdp@901000 {
+ compatible = "qcom,mdp5";
+ reg = <0x901000 0x90000>;
+ reg-names = "mdp_phys";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&mmcc MDSS_AHB_CLK>,
+ <&mmcc MDSS_AXI_CLK>,
+ <&mmcc MDSS_MDP_CLK>,
+ <&mmcc SMMU_MDP_AXI_CLK>,
+ <&mmcc MDSS_VSYNC_CLK>;
+ clock-names = "iface_clk",
+ "bus_clk",
+ "core_clk",
+ "iommu_clk",
+ "vsync_clk";
+
+ iommus = <&mdp_smmu 0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ mdp5_intf3_out: endpoint {
+ remote-endpoint = <&hdmi_in>;
+ };
+ };
+ };
+ };
+
+ hdmi: hdmi-tx@9a0000 {
+ compatible = "qcom,hdmi-tx-8996";
+ reg = <0x009a0000 0x50c>,
+ <0x00070000 0x6158>,
+ <0x009e0000 0xfff>;
+ reg-names = "core_physical",
+ "qfprom_physical",
+ "hdcp_physical";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&mmcc MDSS_MDP_CLK>,
+ <&mmcc MDSS_AHB_CLK>,
+ <&mmcc MDSS_HDMI_CLK>,
+ <&mmcc MDSS_HDMI_AHB_CLK>,
+ <&mmcc MDSS_EXTPCLK_CLK>;
+ clock-names =
+ "mdp_core_clk",
+ "iface_clk",
+ "core_clk",
+ "alt_iface_clk",
+ "extp_clk";
+
+ phys = <&hdmi_phy>;
+ phy-names = "hdmi_phy";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ hdmi_in: endpoint {
+ remote-endpoint = <&mdp5_intf3_out>;
+ };
+ };
+ };
+ };
+
+ hdmi_phy: hdmi-phy@9a0600 {
+ compatible = "qcom,hdmi-phy-8996";
+ reg = <0x9a0600 0x1c4>,
+ <0x9a0a00 0x124>,
+ <0x9a0c00 0x124>,
+ <0x9a0e00 0x124>,
+ <0x9a1000 0x124>,
+ <0x9a1200 0x0c8>;
+ reg-names = "hdmi_pll",
+ "hdmi_tx_l0",
+ "hdmi_tx_l1",
+ "hdmi_tx_l2",
+ "hdmi_tx_l3",
+ "hdmi_phy";
+
+ clocks = <&mmcc MDSS_AHB_CLK>,
+ <&gcc GCC_HDMI_CLKREF_CLK>;
+ clock-names = "iface_clk",
+ "ref_clk";
+ };
+ };
+ };
+
+ adsp-pil {
+ compatible = "qcom,msm8996-adsp-pil";
+
+ interrupts-extended = <&intc 0 162 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 3 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog", "fatal", "ready",
+ "handover", "stop-ack";
+
+ clocks = <&xo_board>;
+ clock-names = "xo";
+
+ memory-region = <&adsp_region>;
+
+ qcom,smem-states = <&adsp_smp2p_out 0>;
+ qcom,smem-state-names = "stop";
+
+ smd-edge {
+ interrupts = <GIC_SPI 156 IRQ_TYPE_EDGE_RISING>;
+
+ label = "lpass";
+ qcom,ipc = <&apcs 16 8>;
+ qcom,smd-edge = <1>;
+ qcom,remote-pid = <2>;
+ };
+ };
+
+ adsp-smp2p {
+ compatible = "qcom,smp2p";
+ qcom,smem = <443>, <429>;
+
+ interrupts = <0 158 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,ipc = <&apcs 16 10>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <2>;
+
+ adsp_smp2p_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ adsp_smp2p_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ modem-smp2p {
+ compatible = "qcom,smp2p";
+ qcom,smem = <435>, <428>;
+
+ interrupts = <GIC_SPI 451 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,ipc = <&apcs 16 14>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <1>;
+
+ modem_smp2p_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ modem_smp2p_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ smp2p-slpi {
+ compatible = "qcom,smp2p";
+ qcom,smem = <481>, <430>;
+
+ interrupts = <GIC_SPI 178 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,ipc = <&apcs 16 26>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <3>;
+
+ slpi_smp2p_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ slpi_smp2p_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+ };
+
+};
+#include "msm8996-pins.dtsi"
+#include "pm8994.dtsi"
+#include "pmi8994.dtsi"
diff --git a/rr-cache/55079890805cf5c92a96f3ecef732cb5a510f73e/preimage b/rr-cache/55079890805cf5c92a96f3ecef732cb5a510f73e/preimage
new file mode 100644
index 0000000..edb7ef6
--- /dev/null
+++ b/rr-cache/55079890805cf5c92a96f3ecef732cb5a510f73e/preimage
@@ -0,0 +1,1434 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/qcom,gcc-msm8996.h>
+#include <dt-bindings/clock/qcom,mmcc-msm8996.h>
+#include <dt-bindings/clock/qcom,rpmcc.h>
+
+/ {
+ model = "Qualcomm Technologies, Inc. MSM8996";
+
+ interrupt-parent = <&intc>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen { };
+
+ memory {
+ device_type = "memory";
+ /* We expect the bootloader to fill in the reg */
+ reg = <0 0 0 0>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ mba_region: mba@91500000 {
+ reg = <0x0 0x91500000 0x0 0x200000>;
+ no-map;
+ };
+
+ slpi_region: slpi@90b00000 {
+ reg = <0x0 0x90b00000 0x0 0xa00000>;
+ no-map;
+ };
+
+ venus_region: venus@90400000 {
+ reg = <0x0 0x90400000 0x0 0x700000>;
+ no-map;
+ };
+
+ adsp_region: adsp@8ea00000 {
+ reg = <0x0 0x8ea00000 0x0 0x1a00000>;
+ no-map;
+ };
+
+ mpss_region: mpss@88800000 {
+ reg = <0x0 0x88800000 0x0 0x6200000>;
+ no-map;
+ };
+
+ smem_mem: smem-mem@86000000 {
+ reg = <0x0 0x86000000 0x0 0x200000>;
+ no-map;
+ };
+
+ memory@85800000 {
+ reg = <0x0 0x85800000 0x0 0x800000>;
+ no-map;
+ };
+
+ memory@86200000 {
+ reg = <0x0 0x86200000 0x0 0x2600000>;
+ no-map;
+ };
+
+ rmtfs@86700000 {
+ compatible = "qcom,rmtfs-mem";
+
+ size = <0x0 0x200000>;
+ alloc-ranges = <0x0 0xa0000000 0x0 0x2000000>;
+ no-map;
+
+ qcom,client-id = <1>;
+ qcom,vmid = <15>;
+ };
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ CPU0: cpu@0 {
+ device_type = "cpu";
+ compatible = "qcom,kryo";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ next-level-cache = <&L2_0>;
+ L2_0: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ };
+ };
+
+ CPU1: cpu@1 {
+ device_type = "cpu";
+ compatible = "qcom,kryo";
+ reg = <0x0 0x1>;
+ enable-method = "psci";
+ next-level-cache = <&L2_0>;
+ };
+
+ CPU2: cpu@100 {
+ device_type = "cpu";
+ compatible = "qcom,kryo";
+ reg = <0x0 0x100>;
+ enable-method = "psci";
+ next-level-cache = <&L2_1>;
+ L2_1: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ };
+ };
+
+ CPU3: cpu@101 {
+ device_type = "cpu";
+ compatible = "qcom,kryo";
+ reg = <0x0 0x101>;
+ enable-method = "psci";
+ next-level-cache = <&L2_1>;
+ };
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&CPU0>;
+ };
+
+ core1 {
+ cpu = <&CPU1>;
+ };
+ };
+
+ cluster1 {
+ core0 {
+ cpu = <&CPU2>;
+ };
+
+ core1 {
+ cpu = <&CPU3>;
+ };
+ };
+ };
+ };
+
+ thermal-zones {
+ cpu-thermal0 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 3>;
+
+ trips {
+ cpu_alert0: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit0: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal1 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 5>;
+
+ trips {
+ cpu_alert1: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit1: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal2 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 8>;
+
+ trips {
+ cpu_alert2: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit2: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal3 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 10>;
+
+ trips {
+ cpu_alert3: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit3: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ clocks {
+ xo_board: xo_board {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <19200000>;
+ clock-output-names = "xo_board";
+ };
+
+ sleep_clk: sleep_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32764>;
+ clock-output-names = "sleep_clk";
+ };
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ firmware {
+ scm {
+ compatible = "qcom,scm-msm8996";
+
+ qcom,dload-mode = <&tcsr 0x13000>;
+ };
+ };
+
+ tcsr_mutex: hwlock {
+ compatible = "qcom,tcsr-mutex";
+ syscon = <&tcsr_mutex_regs 0 0x1000>;
+ #hwlock-cells = <1>;
+ };
+
+ smem {
+ compatible = "qcom,smem";
+ memory-region = <&smem_mem>;
+ hwlocks = <&tcsr_mutex 3>;
+ };
+
+ rpm-glink {
+ compatible = "qcom,glink-rpm";
+
+ interrupts = <GIC_SPI 168 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,rpm-msg-ram = <&rpm_msg_ram>;
+
+ mboxes = <&apcs_glb 0>;
+
+ rpm_requests {
+ compatible = "qcom,rpm-msm8996";
+ qcom,glink-channels = "rpm_requests";
+
+ rpmcc: qcom,rpmcc {
+ compatible = "qcom,rpmcc-msm8996";
+ #clock-cells = <1>;
+ };
+
+ pm8994-regulators {
+ compatible = "qcom,rpm-pm8994-regulators";
+
+ pm8994_s1: s1 {};
+ pm8994_s2: s2 {};
+ pm8994_s3: s3 {};
+ pm8994_s4: s4 {};
+ pm8994_s5: s5 {};
+ pm8994_s6: s6 {};
+ pm8994_s7: s7 {};
+ pm8994_s8: s8 {};
+ pm8994_s9: s9 {};
+ pm8994_s10: s10 {};
+ pm8994_s11: s11 {};
+ pm8994_s12: s12 {};
+
+ pm8994_l1: l1 {};
+ pm8994_l2: l2 {};
+ pm8994_l3: l3 {};
+ pm8994_l4: l4 {};
+ pm8994_l5: l5 {};
+ pm8994_l6: l6 {};
+ pm8994_l7: l7 {};
+ pm8994_l8: l8 {};
+ pm8994_l9: l9 {};
+ pm8994_l10: l10 {};
+ pm8994_l11: l11 {};
+ pm8994_l12: l12 {};
+ pm8994_l13: l13 {};
+ pm8994_l14: l14 {};
+ pm8994_l15: l15 {};
+ pm8994_l16: l16 {};
+ pm8994_l17: l17 {};
+ pm8994_l18: l18 {};
+ pm8994_l19: l19 {};
+ pm8994_l20: l20 {};
+ pm8994_l21: l21 {};
+ pm8994_l22: l22 {};
+ pm8994_l23: l23 {};
+ pm8994_l24: l24 {};
+ pm8994_l25: l25 {};
+ pm8994_l26: l26 {};
+ pm8994_l27: l27 {};
+ pm8994_l28: l28 {};
+ pm8994_l29: l29 {};
+ pm8994_l30: l30 {};
+ pm8994_l31: l31 {};
+ pm8994_l32: l32 {};
+ };
+
+ };
+ };
+
+ soc: soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0xffffffff>;
+ compatible = "simple-bus";
+
+ rpm_msg_ram: memory@68000 {
+ compatible = "qcom,rpm-msg-ram";
+ reg = <0x68000 0x6000>;
+ };
+
+ tcsr_mutex_regs: syscon@740000 {
+ compatible = "syscon";
+ reg = <0x740000 0x20000>;
+ };
+
+ tcsr: syscon@7a0000 {
+ compatible = "qcom,tcsr-msm8996", "syscon";
+ reg = <0x7a0000 0x18000>;
+ };
+
+ intc: interrupt-controller@9bc0000 {
+ compatible = "arm,gic-v3";
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ #redistributor-regions = <1>;
+ redistributor-stride = <0x0 0x40000>;
+ reg = <0x09bc0000 0x10000>,
+ <0x09c00000 0x100000>;
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ apcs: syscon@9820000 {
+ compatible = "syscon";
+ reg = <0x9820000 0x1000>;
+ };
+
+ apcs_glb: mailbox@9820000 {
+ compatible = "qcom,msm8996-apcs-hmss-global";
+ reg = <0x9820000 0x1000>;
+
+ #mbox-cells = <1>;
+ };
+
+ gcc: clock-controller@300000 {
+ compatible = "qcom,gcc-msm8996";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ reg = <0x300000 0x90000>;
+ };
+
+ kryocc: clock-controller@6400000 {
+ compatible = "qcom,apcc-msm8996";
+ reg = <0x6400000 0x90000>;
+ #clock-cells = <1>;
+ };
+
+ blsp1_uart1: serial@7570000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0x07570000 0x1000>;
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_UART2_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ status = "disabled";
+ };
+
+ blsp1_spi0: spi@7575000 {
+ compatible = "qcom,spi-qup-v2.2.1";
+ reg = <0x07575000 0x600>;
+ interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_QUP1_SPI_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp1_spi0_default>;
+ pinctrl-1 = <&blsp1_spi0_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp2_i2c0: i2c@75b5000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x075b5000 0x1000>;
+ interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_AHB_CLK>,
+ <&gcc GCC_BLSP2_QUP1_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_i2c0_default>;
+ pinctrl-1 = <&blsp2_i2c0_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ tsens0: thermal-sensor@4a8000 {
+ compatible = "qcom,msm8996-tsens";
+ reg = <0x4a8000 0x2000>;
+ #thermal-sensor-cells = <1>;
+ };
+
+ blsp2_uart1: serial@75b0000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0x75b0000 0x1000>;
+ interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_UART2_APPS_CLK>,
+ <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "core", "iface";
+ status = "disabled";
+ };
+
+ blsp2_i2c1: i2c@75b6000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x075b6000 0x1000>;
+ interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_AHB_CLK>,
+ <&gcc GCC_BLSP2_QUP2_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_i2c1_default>;
+ pinctrl-1 = <&blsp2_i2c1_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp2_uart2: serial@75b1000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0x075b1000 0x1000>;
+ interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_UART3_APPS_CLK>,
+ <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "core", "iface";
+ status = "disabled";
+ };
+
+ blsp1_i2c2: i2c@7577000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x07577000 0x1000>;
+ interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_AHB_CLK>,
+ <&gcc GCC_BLSP1_QUP3_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp1_i2c2_default>;
+ pinctrl-1 = <&blsp1_i2c2_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp2_spi5: spi@75ba000{
+ compatible = "qcom,spi-qup-v2.2.1";
+ reg = <0x075ba000 0x600>;
+ interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_QUP6_SPI_APPS_CLK>,
+ <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "core", "iface";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_spi5_default>;
+ pinctrl-1 = <&blsp2_spi5_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ sdhc2: sdhci@74a4900 {
+ status = "disabled";
+ compatible = "qcom,sdhci-msm-v4";
+ reg = <0x74a4900 0x314>, <0x74a4000 0x800>;
+ reg-names = "hc_mem", "core_mem";
+
+ interrupts = <0 125 IRQ_TYPE_LEVEL_HIGH>,
+ <0 221 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hc_irq", "pwr_irq";
+
+ clock-names = "iface", "core", "xo";
+ clocks = <&gcc GCC_SDCC2_AHB_CLK>,
+ <&gcc GCC_SDCC2_APPS_CLK>,
+ <&xo_board>;
+ bus-width = <4>;
+ };
+
+ msmgpio: pinctrl@1010000 {
+ compatible = "qcom,msm8996-pinctrl";
+ reg = <0x01010000 0x300000>;
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ timer@9840000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ compatible = "arm,armv7-timer-mem";
+ reg = <0x09840000 0x1000>;
+ clock-frequency = <19200000>;
+
+ frame@9850000 {
+ frame-number = <0>;
+ interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x09850000 0x1000>,
+ <0x09860000 0x1000>;
+ };
+
+ frame@9870000 {
+ frame-number = <1>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x09870000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@9880000 {
+ frame-number = <2>;
+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x09880000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@9890000 {
+ frame-number = <3>;
+ interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x09890000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@98a0000 {
+ frame-number = <4>;
+ interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x098a0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@98b0000 {
+ frame-number = <5>;
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x098b0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@98c0000 {
+ frame-number = <6>;
+ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x098c0000 0x1000>;
+ status = "disabled";
+ };
+ };
+
+ spmi_bus: qcom,spmi@400f000 {
+ compatible = "qcom,spmi-pmic-arb";
+ reg = <0x400f000 0x1000>,
+ <0x4400000 0x800000>,
+ <0x4c00000 0x800000>,
+ <0x5800000 0x200000>,
+ <0x400a000 0x002100>;
+ reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
+ interrupt-names = "periph_irq";
+ interrupts = <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,ee = <0>;
+ qcom,channel = <0>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ interrupt-controller;
+ #interrupt-cells = <4>;
+ };
+
+ ufsphy: phy@627000 {
+ compatible = "qcom,msm8996-ufs-phy-qmp-14nm";
+ reg = <0x627000 0xda8>;
+ reg-names = "phy_mem";
+ #phy-cells = <0>;
+
+ vdda-phy-supply = <&pm8994_l28>;
+ vdda-pll-supply = <&pm8994_l12>;
+
+ vdda-phy-max-microamp = <18380>;
+ vdda-pll-max-microamp = <9440>;
+
+ vddp-ref-clk-supply = <&pm8994_l25>;
+ vddp-ref-clk-max-microamp = <100>;
+ vddp-ref-clk-always-on;
+
+ clock-names = "ref_clk_src", "ref_clk";
+ clocks = <&rpmcc RPM_SMD_LN_BB_CLK>,
+ <&gcc GCC_UFS_CLKREF_CLK>;
+ status = "disabled";
+ };
+
+ ufshc@624000 {
+ compatible = "qcom,ufshc";
+ reg = <0x624000 0x2500>;
+ interrupts = <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>;
+
+ phys = <&ufsphy>;
+ phy-names = "ufsphy";
+
+ vcc-supply = <&pm8994_l20>;
+ vccq-supply = <&pm8994_l25>;
+ vccq2-supply = <&pm8994_s4>;
+
+ vcc-max-microamp = <600000>;
+ vccq-max-microamp = <450000>;
+ vccq2-max-microamp = <450000>;
+
+ power-domains = <&gcc UFS_GDSC>;
+
+ clock-names =
+ "core_clk_src",
+ "core_clk",
+ "bus_clk",
+ "bus_aggr_clk",
+ "iface_clk",
+ "core_clk_unipro_src",
+ "core_clk_unipro",
+ "core_clk_ice",
+ "ref_clk",
+ "tx_lane0_sync_clk",
+ "rx_lane0_sync_clk";
+ clocks =
+ <&gcc UFS_AXI_CLK_SRC>,
+ <&gcc GCC_UFS_AXI_CLK>,
+ <&gcc GCC_SYS_NOC_UFS_AXI_CLK>,
+ <&gcc GCC_AGGRE2_UFS_AXI_CLK>,
+ <&gcc GCC_UFS_AHB_CLK>,
+ <&gcc UFS_ICE_CORE_CLK_SRC>,
+ <&gcc GCC_UFS_UNIPRO_CORE_CLK>,
+ <&gcc GCC_UFS_ICE_CORE_CLK>,
+ <&rpmcc RPM_SMD_LN_BB_CLK>,
+ <&gcc GCC_UFS_TX_SYMBOL_0_CLK>,
+ <&gcc GCC_UFS_RX_SYMBOL_0_CLK>;
+ freq-table-hz =
+ <100000000 200000000>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <150000000 300000000>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>;
+
+ lanes-per-direction = <1>;
+ status = "disabled";
+
+ ufs_variant {
+ compatible = "qcom,ufs_variant";
+ };
+ };
+
+ mmcc: clock-controller@8c0000 {
+ compatible = "qcom,mmcc-msm8996";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ reg = <0x8c0000 0x40000>;
+ assigned-clocks = <&mmcc MMPLL9_PLL>,
+ <&mmcc MMPLL1_PLL>,
+ <&mmcc MMPLL3_PLL>,
+ <&mmcc MMPLL4_PLL>,
+ <&mmcc MMPLL5_PLL>;
+ assigned-clock-rates = <624000000>,
+ <810000000>,
+ <980000000>,
+ <960000000>,
+ <825000000>;
+ };
+
+ qfprom@74000 {
+ compatible = "qcom,qfprom";
+ reg = <0x74000 0x8ff>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ qusb2p_hstx_trim: hstx_trim@24e {
+ reg = <0x24e 0x2>;
+ bits = <5 4>;
+ };
+
+ qusb2s_hstx_trim: hstx_trim@24f {
+ reg = <0x24f 0x1>;
+ bits = <1 4>;
+ };
+ };
+
+ phy@34000 {
+ compatible = "qcom,msm8996-qmp-pcie-phy";
+ reg = <0x34000 0x488>;
+ #clock-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
+ <&gcc GCC_PCIE_PHY_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_CLKREF_CLK>;
+ clock-names = "aux", "cfg_ahb", "ref";
+
+ vdda-phy-supply = <&pm8994_l28>;
+ vdda-pll-supply = <&pm8994_l12>;
+
+ resets = <&gcc GCC_PCIE_PHY_BCR>,
+ <&gcc GCC_PCIE_PHY_COM_BCR>,
+ <&gcc GCC_PCIE_PHY_COM_NOCSR_BCR>;
+ reset-names = "phy", "common", "cfg";
+ status = "disabled";
+
+ pciephy_0: lane@35000 {
+ reg = <0x035000 0x130>,
+ <0x035200 0x200>,
+ <0x035400 0x1dc>;
+ #phy-cells = <0>;
+
+ clock-output-names = "pcie_0_pipe_clk_src";
+ clocks = <&gcc GCC_PCIE_0_PIPE_CLK>;
+ clock-names = "pipe0";
+ resets = <&gcc GCC_PCIE_0_PHY_BCR>;
+ reset-names = "lane0";
+ };
+
+ pciephy_1: lane@36000 {
+ reg = <0x036000 0x130>,
+ <0x036200 0x200>,
+ <0x036400 0x1dc>;
+ #phy-cells = <0>;
+
+ clock-output-names = "pcie_1_pipe_clk_src";
+ clocks = <&gcc GCC_PCIE_1_PIPE_CLK>;
+ clock-names = "pipe1";
+ resets = <&gcc GCC_PCIE_1_PHY_BCR>;
+ reset-names = "lane1";
+ };
+
+ pciephy_2: lane@37000 {
+ reg = <0x037000 0x130>,
+ <0x037200 0x200>,
+ <0x037400 0x1dc>;
+ #phy-cells = <0>;
+
+ clock-output-names = "pcie_2_pipe_clk_src";
+ clocks = <&gcc GCC_PCIE_2_PIPE_CLK>;
+ clock-names = "pipe2";
+ resets = <&gcc GCC_PCIE_2_PHY_BCR>;
+ reset-names = "lane2";
+ };
+ };
+
+ phy@7410000 {
+ compatible = "qcom,msm8996-qmp-usb3-phy";
+ reg = <0x7410000 0x1c4>;
+ #clock-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clocks = <&gcc GCC_USB3_PHY_AUX_CLK>,
+ <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+ <&gcc GCC_USB3_CLKREF_CLK>;
+ clock-names = "aux", "cfg_ahb", "ref";
+
+ vdda-phy-supply = <&pm8994_l28>;
+ vdda-pll-supply = <&pm8994_l12>;
+
+ resets = <&gcc GCC_USB3_PHY_BCR>,
+ <&gcc GCC_USB3PHY_PHY_BCR>;
+ reset-names = "phy", "common";
+ status = "disabled";
+
+ ssusb_phy_0: lane@7410200 {
+ reg = <0x7410200 0x200>,
+ <0x7410400 0x130>,
+ <0x7410600 0x1a8>;
+ #phy-cells = <0>;
+
+ clock-output-names = "usb3_phy_pipe_clk_src";
+ clocks = <&gcc GCC_USB3_PHY_PIPE_CLK>;
+ clock-names = "pipe0";
+ };
+ };
+
+ hsusb_phy1: phy@7411000 {
+ compatible = "qcom,msm8996-qusb2-phy";
+ reg = <0x7411000 0x180>;
+ #phy-cells = <0>;
+
+ clocks = <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+ <&gcc GCC_RX1_USB2_CLKREF_CLK>;
+ clock-names = "cfg_ahb", "ref";
+
+ vdda-pll-supply = <&pm8994_l12>;
+ vdda-phy-dpdm-supply = <&pm8994_l24>;
+
+ resets = <&gcc GCC_QUSB2PHY_PRIM_BCR>;
+ nvmem-cells = <&qusb2p_hstx_trim>;
+ status = "disabled";
+ };
+
+ hsusb_phy2: phy@7412000 {
+ compatible = "qcom,msm8996-qusb2-phy";
+ reg = <0x7412000 0x180>;
+ #phy-cells = <0>;
+
+ clocks = <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+ <&gcc GCC_RX2_USB2_CLKREF_CLK>;
+ clock-names = "cfg_ahb", "ref";
+
+ vdda-pll-supply = <&pm8994_l12>;
+ vdda-phy-dpdm-supply = <&pm8994_l24>;
+
+ resets = <&gcc GCC_QUSB2PHY_SEC_BCR>;
+ nvmem-cells = <&qusb2s_hstx_trim>;
+ status = "disabled";
+ };
+
+ usb2: usb@7600000 {
+ compatible = "qcom,dwc3";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clocks = <&gcc GCC_PERIPH_NOC_USB20_AHB_CLK>,
+ <&gcc GCC_USB20_MASTER_CLK>,
+ <&gcc GCC_USB20_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB20_SLEEP_CLK>,
+ <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
+
+ assigned-clocks = <&gcc GCC_USB20_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB20_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <60000000>;
+
+ power-domains = <&gcc USB30_GDSC>;
+ status = "disabled";
+
+ dwc3@7600000 {
+ compatible = "snps,dwc3";
+ reg = <0x7600000 0xcc00>;
+ interrupts = <0 138 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&hsusb_phy2>;
+ phy-names = "usb2-phy";
+ };
+ };
+
+ usb3: usb@6a00000 {
+ compatible = "qcom,dwc3";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clocks = <&gcc GCC_SYS_NOC_USB3_AXI_CLK>,
+ <&gcc GCC_USB30_MASTER_CLK>,
+ <&gcc GCC_AGGRE2_USB3_AXI_CLK>,
+ <&gcc GCC_USB30_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB30_SLEEP_CLK>,
+ <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
+
+ assigned-clocks = <&gcc GCC_USB30_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB30_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <120000000>;
+
+ power-domains = <&gcc USB30_GDSC>;
+ status = "disabled";
+
+ dwc3@6a00000 {
+ compatible = "snps,dwc3";
+ reg = <0x6a00000 0xcc00>;
+ interrupts = <0 131 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&hsusb_phy1>, <&ssusb_phy_0>;
+ phy-names = "usb2-phy", "usb3-phy";
+ };
+ };
+
+ agnoc@0 {
+ power-domains = <&gcc AGGRE0_NOC_GDSC>;
+ compatible = "simple-pm-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ pcie0: pcie@600000 {
+ compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
+ status = "disabled";
+ power-domains = <&gcc PCIE0_GDSC>;
+ bus-range = <0x00 0xff>;
+ num-lanes = <1>;
+
+ reg = <0x00600000 0x2000>,
+ <0x0c000000 0xf1d>,
+ <0x0c000f20 0xa8>,
+ <0x0c100000 0x100000>;
+ reg-names = "parf", "dbi", "elbi","config";
+
+ phys = <&pciephy_0>;
+ phy-names = "pciephy";
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x01000000 0x0 0x0c200000 0x0c200000 0x0 0x100000>,
+ <0x02000000 0x0 0x0c300000 0x0c300000 0x0 0xd00000>;
+
+ interrupts = <GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 244 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+ <0 0 0 2 &intc 0 245 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+ <0 0 0 3 &intc 0 247 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+ <0 0 0 4 &intc 0 248 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pcie0_clkreq_default &pcie0_perst_default &pcie0_wake_default>;
+ pinctrl-1 = <&pcie0_clkreq_sleep &pcie0_perst_default &pcie0_wake_sleep>;
+
+
+ vdda-supply = <&pm8994_l28>;
+
+ linux,pci-domain = <0>;
+
+ clocks = <&gcc GCC_PCIE_0_PIPE_CLK>,
+ <&gcc GCC_PCIE_0_AUX_CLK>,
+ <&gcc GCC_PCIE_0_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_0_MSTR_AXI_CLK>,
+ <&gcc GCC_PCIE_0_SLV_AXI_CLK>;
+
+ clock-names = "pipe",
+ "aux",
+ "cfg",
+ "bus_master",
+ "bus_slave";
+
+ };
+
+ pcie1: pcie@608000 {
+ compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
+ power-domains = <&gcc PCIE1_GDSC>;
+ bus-range = <0x00 0xff>;
+ num-lanes = <1>;
+
+ status = "disabled";
+
+ reg = <0x00608000 0x2000>,
+ <0x0d000000 0xf1d>,
+ <0x0d000f20 0xa8>,
+ <0x0d100000 0x100000>;
+
+ reg-names = "parf", "dbi", "elbi","config";
+
+ phys = <&pciephy_1>;
+ phy-names = "pciephy";
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x01000000 0x0 0x0d200000 0x0d200000 0x0 0x100000>,
+ <0x02000000 0x0 0x0d300000 0x0d300000 0x0 0xd00000>;
+
+ interrupts = <GIC_SPI 413 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 272 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+ <0 0 0 2 &intc 0 273 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+ <0 0 0 3 &intc 0 274 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+ <0 0 0 4 &intc 0 275 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pcie1_clkreq_default &pcie1_perst_default &pcie1_wake_default>;
+ pinctrl-1 = <&pcie1_clkreq_sleep &pcie1_perst_default &pcie1_wake_sleep>;
+
+
+ vdda-supply = <&pm8994_l28>;
+ linux,pci-domain = <1>;
+
+ clocks = <&gcc GCC_PCIE_1_PIPE_CLK>,
+ <&gcc GCC_PCIE_1_AUX_CLK>,
+ <&gcc GCC_PCIE_1_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_1_MSTR_AXI_CLK>,
+ <&gcc GCC_PCIE_1_SLV_AXI_CLK>;
+
+ clock-names = "pipe",
+ "aux",
+ "cfg",
+ "bus_master",
+ "bus_slave";
+ };
+
+ pcie2: pcie@610000 {
+ compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
+ power-domains = <&gcc PCIE2_GDSC>;
+ bus-range = <0x00 0xff>;
+ num-lanes = <1>;
+ status = "disabled";
+ reg = <0x00610000 0x2000>,
+ <0x0e000000 0xf1d>,
+ <0x0e000f20 0xa8>,
+ <0x0e100000 0x100000>;
+
+ reg-names = "parf", "dbi", "elbi","config";
+
+ phys = <&pciephy_2>;
+ phy-names = "pciephy";
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x01000000 0x0 0x0e200000 0x0e200000 0x0 0x100000>,
+ <0x02000000 0x0 0x0e300000 0x0e300000 0x0 0x1d00000>;
+
+ device_type = "pci";
+
+ interrupts = <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 142 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+ <0 0 0 2 &intc 0 143 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+ <0 0 0 3 &intc 0 144 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+ <0 0 0 4 &intc 0 145 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pcie2_clkreq_default &pcie2_perst_default &pcie2_wake_default>;
+ pinctrl-1 = <&pcie2_clkreq_sleep &pcie2_perst_default &pcie2_wake_sleep >;
+
+ vdda-supply = <&pm8994_l28>;
+
+ linux,pci-domain = <2>;
+ clocks = <&gcc GCC_PCIE_2_PIPE_CLK>,
+ <&gcc GCC_PCIE_2_AUX_CLK>,
+ <&gcc GCC_PCIE_2_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_2_MSTR_AXI_CLK>,
+ <&gcc GCC_PCIE_2_SLV_AXI_CLK>;
+
+ clock-names = "pipe",
+ "aux",
+ "cfg",
+ "bus_master",
+ "bus_slave";
+ };
+ };
+<<<<<<<
+=======
+
+ ufsphy1: ufsphy@627000 {
+ compatible = "qcom,msm8996-ufs-phy-qmp-14nm";
+ reg = <0x627000 0xda8>;
+ reg-names = "phy_mem";
+ #phy-cells = <0>;
+ vdda-phy-max-microamp = <18380>;
+ vdda-pll-max-microamp = <9440>;
+
+ vddp-ref-clk-supply = <&pm8994_l25>;
+ vddp-ref-clk-max-microamp = <100>;
+ vddp-ref-clk-always-on;
+ clock-names = "ref_clk_src", "ref_clk";
+ clocks = <&rpmcc RPM_SMD_LN_BB_CLK>,
+ <&gcc GCC_UFS_CLKREF_CLK>;
+ status = "disabled";
+ power-domains = <&gcc UFS_GDSC>;
+
+ };
+
+ ufshc@624000 {
+ compatible = "qcom,ufshc";
+ reg = <0x624000 0x2500>;
+ interrupts = <0 265 0>;
+
+ phys = <&ufsphy1>;
+ phy-names = "ufsphy";
+
+ vcc-supply = <&pm8994_l20>;
+ vccq-supply = <&pm8994_l25>;
+ vccq2-supply = <&pm8994_s4>;
+
+ vcc-max-microamp = <600000>;
+ vccq-max-microamp = <450000>;
+ vccq2-max-microamp = <450000>;
+
+ clock-names =
+ "core_clk_src",
+ "core_clk",
+ "bus_clk",
+ "bus_aggr_clk",
+ "iface_clk",
+ "core_clk_unipro_src",
+ "core_clk_unipro",
+ "core_clk_ice",
+ "ref_clk",
+ "tx_lane0_sync_clk",
+ "rx_lane0_sync_clk";
+ clocks =
+ <&gcc UFS_AXI_CLK_SRC>,
+ <&gcc GCC_UFS_AXI_CLK>,
+ <&gcc GCC_SYS_NOC_UFS_AXI_CLK>,
+ <&gcc GCC_AGGRE2_UFS_AXI_CLK>,
+ <&gcc GCC_UFS_AHB_CLK>,
+ <&gcc UFS_ICE_CORE_CLK_SRC>,
+ <&gcc GCC_UFS_UNIPRO_CORE_CLK>,
+ <&gcc GCC_UFS_ICE_CORE_CLK>,
+ <&rpmcc RPM_SMD_LN_BB_CLK>,
+ <&gcc GCC_UFS_TX_SYMBOL_0_CLK>,
+ <&gcc GCC_UFS_RX_SYMBOL_0_CLK>;
+ freq-table-hz =
+ <100000000 200000000>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <150000000 300000000>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>;
+
+ lanes-per-direction = <1>;
+ status = "disabled";
+
+ ufs_variant {
+ compatible = "qcom,ufs_variant";
+ };
+ };
+
+ mdp_smmu: arm,smmu@d00000 {
+ compatible = "qcom,msm8996-smmu-v2";
+ reg = <0xd00000 0x10000>;
+
+ #global-interrupts = <1>;
+ interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>;
+ #iommu-cells = <1>;
+
+ power-domains = <&mmcc MDSS_GDSC>;
+
+ clocks = <&mmcc SMMU_MDP_AHB_CLK>,
+ <&mmcc SMMU_MDP_AXI_CLK>;
+ clock-names = "iface", "bus";
+
+ status = "okay";
+ };
+
+ mdss: mdss@900000 {
+ compatible = "qcom,mdss";
+
+ reg = <0x900000 0x1000>,
+ <0x9b0000 0x1040>,
+ <0x9b8000 0x1040>;
+ reg-names = "mdss_phys",
+ "vbif_phys",
+ "vbif_nrt_phys";
+
+ power-domains = <&mmcc MDSS_GDSC>;
+ interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ clocks = <&mmcc MDSS_AHB_CLK>;
+ clock-names = "iface_clk";
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ mdp: mdp@901000 {
+ compatible = "qcom,mdp5";
+ reg = <0x901000 0x90000>;
+ reg-names = "mdp_phys";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&mmcc MDSS_AHB_CLK>,
+ <&mmcc MDSS_AXI_CLK>,
+ <&mmcc MDSS_MDP_CLK>,
+ <&mmcc SMMU_MDP_AXI_CLK>,
+ <&mmcc MDSS_VSYNC_CLK>;
+ clock-names = "iface_clk",
+ "bus_clk",
+ "core_clk",
+ "iommu_clk",
+ "vsync_clk";
+
+ iommus = <&mdp_smmu 0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ mdp5_intf3_out: endpoint {
+ remote-endpoint = <&hdmi_in>;
+ };
+ };
+ };
+ };
+
+ hdmi: hdmi-tx@9a0000 {
+ compatible = "qcom,hdmi-tx-8996";
+ reg = <0x009a0000 0x50c>,
+ <0x00070000 0x6158>,
+ <0x009e0000 0xfff>;
+ reg-names = "core_physical",
+ "qfprom_physical",
+ "hdcp_physical";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&mmcc MDSS_MDP_CLK>,
+ <&mmcc MDSS_AHB_CLK>,
+ <&mmcc MDSS_HDMI_CLK>,
+ <&mmcc MDSS_HDMI_AHB_CLK>,
+ <&mmcc MDSS_EXTPCLK_CLK>;
+ clock-names =
+ "mdp_core_clk",
+ "iface_clk",
+ "core_clk",
+ "alt_iface_clk",
+ "extp_clk";
+
+ phys = <&hdmi_phy>;
+ phy-names = "hdmi_phy";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ hdmi_in: endpoint {
+ remote-endpoint = <&mdp5_intf3_out>;
+ };
+ };
+ };
+ };
+
+ hdmi_phy: hdmi-phy@9a0600 {
+ compatible = "qcom,hdmi-phy-8996";
+ reg = <0x9a0600 0x1c4>,
+ <0x9a0a00 0x124>,
+ <0x9a0c00 0x124>,
+ <0x9a0e00 0x124>,
+ <0x9a1000 0x124>,
+ <0x9a1200 0x0c8>;
+ reg-names = "hdmi_pll",
+ "hdmi_tx_l0",
+ "hdmi_tx_l1",
+ "hdmi_tx_l2",
+ "hdmi_tx_l3",
+ "hdmi_phy";
+
+ clocks = <&mmcc MDSS_AHB_CLK>,
+ <&gcc GCC_HDMI_CLKREF_CLK>;
+ clock-names = "iface_clk",
+ "ref_clk";
+ };
+ };
+>>>>>>>
+ };
+
+ adsp-pil {
+ compatible = "qcom,msm8996-adsp-pil";
+
+ interrupts-extended = <&intc 0 162 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 3 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog", "fatal", "ready",
+ "handover", "stop-ack";
+
+ clocks = <&xo_board>;
+ clock-names = "xo";
+
+ memory-region = <&adsp_region>;
+
+ qcom,smem-states = <&adsp_smp2p_out 0>;
+ qcom,smem-state-names = "stop";
+
+ smd-edge {
+ interrupts = <GIC_SPI 156 IRQ_TYPE_EDGE_RISING>;
+
+ label = "lpass";
+ qcom,ipc = <&apcs 16 8>;
+ qcom,smd-edge = <1>;
+ qcom,remote-pid = <2>;
+ };
+ };
+
+ adsp-smp2p {
+ compatible = "qcom,smp2p";
+ qcom,smem = <443>, <429>;
+
+ interrupts = <0 158 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,ipc = <&apcs 16 10>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <2>;
+
+ adsp_smp2p_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ adsp_smp2p_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ modem-smp2p {
+ compatible = "qcom,smp2p";
+ qcom,smem = <435>, <428>;
+
+ interrupts = <GIC_SPI 451 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,ipc = <&apcs 16 14>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <1>;
+
+ modem_smp2p_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ modem_smp2p_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ smp2p-slpi {
+ compatible = "qcom,smp2p";
+ qcom,smem = <481>, <430>;
+
+ interrupts = <GIC_SPI 178 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,ipc = <&apcs 16 26>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <3>;
+
+ slpi_smp2p_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ slpi_smp2p_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+ };
+
+};
+#include "msm8996-pins.dtsi"
+#include "pm8994.dtsi"
+#include "pmi8994.dtsi"
diff --git a/rr-cache/56cd0ae6607c5a99bf05b02c665b3767b0c05110/preimage b/rr-cache/56cd0ae6607c5a99bf05b02c665b3767b0c05110/preimage
new file mode 100644
index 0000000..082f26c
--- /dev/null
+++ b/rr-cache/56cd0ae6607c5a99bf05b02c665b3767b0c05110/preimage
@@ -0,0 +1,281 @@
+config QCOM_GDSC
+ bool
+ select PM_GENERIC_DOMAINS if PM
+
+config QCOM_RPMCC
+ bool
+
+config COMMON_CLK_QCOM
+ tristate "Support for Qualcomm's clock controllers"
+ depends on OF
+ depends on ARCH_QCOM || COMPILE_TEST
+ select REGMAP_MMIO
+ select RESET_CONTROLLER
+
+config QCOM_A53PLL
+ tristate "MSM8916 A53 PLL"
+ depends on COMMON_CLK_QCOM
+ default ARCH_QCOM
+ help
+ Support for the A53 PLL on MSM8916 devices. It provides
+ the CPU with frequencies above 1GHz.
+ Say Y if you want to support higher CPU frequencies on MSM8916
+ devices.
+
+config QCOM_CLK_APCS_MSM8916
+ tristate "MSM8916 APCS Clock Controller"
+ depends on COMMON_CLK_QCOM
+ depends on QCOM_APCS_IPC || COMPILE_TEST
+ default ARCH_QCOM
+ help
+ Support for the APCS Clock Controller on msm8916 devices. The
+ APCS is managing the mux and divider which feeds the CPUs.
+ Say Y if you want to support CPU frequency scaling on devices
+ such as msm8916.
+
+config QCOM_CLK_RPM
+ tristate "RPM based Clock Controller"
+ depends on COMMON_CLK_QCOM && MFD_QCOM_RPM
+ select QCOM_RPMCC
+ help
+ The RPM (Resource Power Manager) is a dedicated hardware engine for
+ managing the shared SoC resources in order to keep the lowest power
+ profile. It communicates with other hardware subsystems via shared
+ memory and accepts clock requests, aggregates the requests and turns
+ the clocks on/off or scales them on demand.
+ Say Y if you want to support the clocks exposed by the RPM on
+ platforms such as apq8064, msm8660, msm8960 etc.
+
+config QCOM_CLK_SMD_RPM
+ tristate "RPM over SMD based Clock Controller"
+ depends on COMMON_CLK_QCOM && QCOM_SMD_RPM
+ select QCOM_RPMCC
+ help
+ The RPM (Resource Power Manager) is a dedicated hardware engine for
+ managing the shared SoC resources in order to keep the lowest power
+ profile. It communicates with other hardware subsystems via shared
+ memory and accepts clock requests, aggregates the requests and turns
+ the clocks on/off or scales them on demand.
+ Say Y if you want to support the clocks exposed by the RPM on
+ platforms such as apq8016, apq8084, msm8974 etc.
+
+config APQ_GCC_8084
+ tristate "APQ8084 Global Clock Controller"
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on apq8084 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, SATA, PCIe, etc.
+
+config APQ_MMCC_8084
+ tristate "APQ8084 Multimedia Clock Controller"
+ select APQ_GCC_8084
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the multimedia clock controller on apq8084 devices.
+ Say Y if you want to support multimedia devices such as display,
+ graphics, video encode/decode, camera, etc.
+
+config IPQ_GCC_4019
+ tristate "IPQ4019 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on ipq4019 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, etc.
+
+config IPQ_GCC_806X
+ tristate "IPQ806x Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on ipq806x devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, etc.
+
+config IPQ_LCC_806X
+ tristate "IPQ806x LPASS Clock Controller"
+ select IPQ_GCC_806X
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the LPASS clock controller on ipq806x devices.
+ Say Y if you want to use audio devices such as i2s, pcm,
+ S/PDIF, etc.
+
+config IPQ_GCC_8074
+ tristate "IPQ8074 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for global clock controller on ipq8074 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, etc. Select this for the root clock
+ of ipq8074.
+
+config MSM_CLK_RPMH
+ tristate "RPMh Clock Driver"
+ depends on COMMON_CLK_QCOM && QCOM_RPMH
+ help
+ RPMh manages shared resources on some Qualcomm Technologies, Inc.
+ SoCs. It accepts requests from other hardware subsystems via RSC.
+ Say Y if you want to support the clocks exposed by RPMh on
+ platforms such as sdm845.
+
+config MSM_GCC_8660
+ tristate "MSM8660 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8660 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, etc.
+
+config MSM_GCC_8916
+ tristate "MSM8916 Global Clock Controller"
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8916 devices.
+ Say Y if you want to use devices such as UART, SPI i2c, USB,
+ SD/eMMC, display, graphics, camera etc.
+
+config MSM_GCC_8960
+ tristate "APQ8064/MSM8960 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on apq8064/msm8960 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, SATA, PCIe, etc.
+
+config MSM_LCC_8960
+ tristate "APQ8064/MSM8960 LPASS Clock Controller"
+ select MSM_GCC_8960
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the LPASS clock controller on apq8064/msm8960 devices.
+ Say Y if you want to use audio devices such as i2s, pcm,
+ SLIMBus, etc.
+
+config MDM_GCC_9615
+ tristate "MDM9615 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on mdm9615 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, etc.
+
+config MDM_LCC_9615
+ tristate "MDM9615 LPASS Clock Controller"
+ select MDM_GCC_9615
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the LPASS clock controller on mdm9615 devices.
+ Say Y if you want to use audio devices such as i2s, pcm,
+ SLIMBus, etc.
+
+config MSM_MMCC_8960
+ tristate "MSM8960 Multimedia Clock Controller"
+ select MSM_GCC_8960
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the multimedia clock controller on msm8960 devices.
+ Say Y if you want to support multimedia devices such as display,
+ graphics, video encode/decode, camera, etc.
+
+config MSM_GCC_8974
+ tristate "MSM8974 Global Clock Controller"
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8974 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, SATA, PCIe, etc.
+
+config MSM_MMCC_8974
+ tristate "MSM8974 Multimedia Clock Controller"
+ select MSM_GCC_8974
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the multimedia clock controller on msm8974 devices.
+ Say Y if you want to support multimedia devices such as display,
+ graphics, video encode/decode, camera, etc.
+
+config MSM_GCC_8994
+ tristate "MSM8994 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8994 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, UFS, SD/eMMC, PCIe, etc.
+
+config MSM_GCC_8996
+ tristate "MSM8996 Global Clock Controller"
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8996 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, UFS, SD/eMMC, PCIe, etc.
+
+config MSM_MMCC_8996
+ tristate "MSM8996 Multimedia Clock Controller"
+ select MSM_GCC_8996
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the multimedia clock controller on msm8996 devices.
+ Say Y if you want to support multimedia devices such as display,
+ graphics, video encode/decode, camera, etc.
+
+config MSM_GCC_8998
+ tristate "MSM8998 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8998 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, UFS, SD/eMMC, PCIe, etc.
+
+config SDM_GCC_845
+ tristate "SDM845 Global Clock Controller"
+<<<<<<<
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on Qualcomm Technologies, Inc
+ sdm845 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, UFS, SD/eMMC, PCIe, etc.
+=======
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on SDM845 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2C, USB, UFS, SDDC, PCIe, etc.
+
+config SDM_VIDEOCC_845
+ tristate "SDM845 Video Clock Controller"
+ depends on COMMON_CLK_QCOM
+ select SDM_GCC_845
+ select QCOM_GDSC
+ help
+ Support for the video clock controller on SDM845 devices.
+ Say Y if you want to support video devices and functionality such as
+ video encode and decode.
+>>>>>>>
+
+config SPMI_PMIC_CLKDIV
+ tristate "SPMI PMIC clkdiv Support"
+ depends on (COMMON_CLK_QCOM && SPMI) || COMPILE_TEST
+ help
+ This driver supports the clkdiv functionality on the Qualcomm
+ Technologies, Inc. SPMI PMIC. It configures the frequency of
+ clkdiv outputs of the PMIC. These clocks are typically wired
+ through alternate functions on GPIO pins.
+
+config MSM_APCC_8996
+ tristate "MSM8996 CPU Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the CPU clock controller on msm8996 devices.
+ Say Y if you want to support CPU clock scaling using CPUfreq
+ drivers for dyanmic power management.
diff --git a/rr-cache/5784c3b9d4a4d66742fc4285016624c722e91bc7/postimage b/rr-cache/5784c3b9d4a4d66742fc4285016624c722e91bc7/postimage
new file mode 100644
index 0000000..cdcda58
--- /dev/null
+++ b/rr-cache/5784c3b9d4a4d66742fc4285016624c722e91bc7/postimage
@@ -0,0 +1,1224 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
+
+#include <linux/clk.h>
+#include <linux/console.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/qcom-geni-se.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+
+/* UART specific GENI registers */
+#define SE_UART_TX_TRANS_CFG 0x25c
+#define SE_UART_TX_WORD_LEN 0x268
+#define SE_UART_TX_STOP_BIT_LEN 0x26c
+#define SE_UART_TX_TRANS_LEN 0x270
+#define SE_UART_RX_TRANS_CFG 0x280
+#define SE_UART_RX_WORD_LEN 0x28c
+#define SE_UART_RX_STALE_CNT 0x294
+#define SE_UART_TX_PARITY_CFG 0x2a4
+#define SE_UART_RX_PARITY_CFG 0x2a8
+
+/* SE_UART_TRANS_CFG */
+#define UART_TX_PAR_EN BIT(0)
+#define UART_CTS_MASK BIT(1)
+
+/* SE_UART_TX_WORD_LEN */
+#define TX_WORD_LEN_MSK GENMASK(9, 0)
+
+/* SE_UART_TX_STOP_BIT_LEN */
+#define TX_STOP_BIT_LEN_MSK GENMASK(23, 0)
+#define TX_STOP_BIT_LEN_1 0
+#define TX_STOP_BIT_LEN_1_5 1
+#define TX_STOP_BIT_LEN_2 2
+
+/* SE_UART_TX_TRANS_LEN */
+#define TX_TRANS_LEN_MSK GENMASK(23, 0)
+
+/* SE_UART_RX_TRANS_CFG */
+#define UART_RX_INS_STATUS_BIT BIT(2)
+#define UART_RX_PAR_EN BIT(3)
+
+/* SE_UART_RX_WORD_LEN */
+#define RX_WORD_LEN_MASK GENMASK(9, 0)
+
+/* SE_UART_RX_STALE_CNT */
+#define RX_STALE_CNT GENMASK(23, 0)
+
+/* SE_UART_TX_PARITY_CFG/RX_PARITY_CFG */
+#define PAR_CALC_EN BIT(0)
+#define PAR_MODE_MSK GENMASK(2, 1)
+#define PAR_MODE_SHFT 1
+#define PAR_EVEN 0x00
+#define PAR_ODD 0x01
+#define PAR_SPACE 0x10
+#define PAR_MARK 0x11
+
+/* UART M_CMD OP codes */
+#define UART_START_TX 0x1
+#define UART_START_BREAK 0x4
+#define UART_STOP_BREAK 0x5
+/* UART S_CMD OP codes */
+#define UART_START_READ 0x1
+#define UART_PARAM 0x1
+
+#define UART_OVERSAMPLING 32
+#define STALE_TIMEOUT 16
+#define DEFAULT_BITS_PER_CHAR 10
+#define GENI_UART_CONS_PORTS 1
+#define DEF_FIFO_DEPTH_WORDS 16
+#define DEF_TX_WM 2
+#define DEF_FIFO_WIDTH_BITS 32
+#define UART_CONSOLE_RX_WM 2
+
+#ifdef CONFIG_CONSOLE_POLL
+#define RX_BYTES_PW 1
+#else
+#define RX_BYTES_PW 4
+#endif
+
+struct qcom_geni_serial_port {
+ struct uart_port uport;
+ struct geni_se se;
+ char name[20];
+ u32 tx_fifo_depth;
+ u32 tx_fifo_width;
+ u32 rx_fifo_depth;
+ u32 tx_wm;
+ u32 rx_wm;
+ u32 rx_rfr;
+ enum geni_se_xfer_mode xfer_mode;
+ bool setup;
+ int (*handle_rx)(struct uart_port *uport, u32 bytes, bool drop);
+ unsigned int baud;
+ unsigned int tx_bytes_pw;
+ unsigned int rx_bytes_pw;
+ bool brk;
+};
+
+static const struct uart_ops qcom_geni_console_pops;
+static struct uart_driver qcom_geni_console_driver;
+static int handle_rx_console(struct uart_port *uport, u32 bytes, bool drop);
+static unsigned int qcom_geni_serial_tx_empty(struct uart_port *port);
+static void qcom_geni_serial_stop_rx(struct uart_port *uport);
+
+static const unsigned long root_freq[] = {7372800, 14745600, 19200000, 29491200,
+ 32000000, 48000000, 64000000, 80000000,
+ 96000000, 100000000};
+
+#define to_dev_port(ptr, member) \
+ container_of(ptr, struct qcom_geni_serial_port, member)
+
+static struct qcom_geni_serial_port qcom_geni_console_port = {
+ .uport = {
+ .iotype = UPIO_MEM,
+ .ops = &qcom_geni_console_pops,
+ .flags = UPF_BOOT_AUTOCONF,
+ .line = 0,
+ },
+};
+
+static int qcom_geni_serial_request_port(struct uart_port *uport)
+{
+ struct platform_device *pdev = to_platform_device(uport->dev);
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ uport->membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(uport->membase))
+ return PTR_ERR(uport->membase);
+ port->se.base = uport->membase;
+ return 0;
+}
+
+static void qcom_geni_serial_config_port(struct uart_port *uport, int cfg_flags)
+{
+ if (cfg_flags & UART_CONFIG_TYPE) {
+ uport->type = PORT_MSM;
+ qcom_geni_serial_request_port(uport);
+ }
+}
+
+static unsigned int qcom_geni_cons_get_mctrl(struct uart_port *uport)
+{
+ return TIOCM_DSR | TIOCM_CAR | TIOCM_CTS;
+}
+
+static void qcom_geni_cons_set_mctrl(struct uart_port *uport,
+ unsigned int mctrl)
+{
+}
+
+static const char *qcom_geni_serial_get_type(struct uart_port *uport)
+{
+ return "MSM";
+}
+
+static struct qcom_geni_serial_port *get_port_from_line(int line)
+{
+ if (line < 0 || line >= GENI_UART_CONS_PORTS)
+ return ERR_PTR(-ENXIO);
+ return &qcom_geni_console_port;
+}
+
+static bool qcom_geni_serial_poll_bit(struct uart_port *uport,
+ int offset, int field, bool set)
+{
+ u32 reg;
+ struct qcom_geni_serial_port *port;
+ unsigned int baud;
+ unsigned int fifo_bits;
+ unsigned long timeout_us = 20000;
+
+ /* Ensure polling is not re-ordered before the prior writes/reads */
+ mb();
+
+ if (uport->private_data) {
+ port = to_dev_port(uport, uport);
+ baud = port->baud;
+ if (!baud)
+ baud = 115200;
+ fifo_bits = port->tx_fifo_depth * port->tx_fifo_width;
+ /*
+ * Total polling iterations based on FIFO worth of bytes to be
+ * sent at current baud. Add a little fluff to the wait.
+ */
+ timeout_us = ((fifo_bits * USEC_PER_SEC) / baud) + 500;
+ }
+
+ /*
+ * Use custom implementation instead of readl_poll_atomic since ktimer
+ * is not ready at the time of early console.
+ */
+ timeout_us = DIV_ROUND_UP(timeout_us, 10) * 10;
+ while (timeout_us) {
+ reg = readl_relaxed(uport->membase + offset);
+ if ((bool)(reg & field) == set)
+ return true;
+ udelay(10);
+ timeout_us -= 10;
+ }
+ return false;
+}
+
+static void qcom_geni_serial_setup_tx(struct uart_port *uport, u32 xmit_size)
+{
+ u32 m_cmd;
+
+ writel_relaxed(xmit_size, uport->membase + SE_UART_TX_TRANS_LEN);
+ m_cmd = UART_START_TX << M_OPCODE_SHFT;
+ writel(m_cmd, uport->membase + SE_GENI_M_CMD0);
+}
+
+static void qcom_geni_serial_poll_tx_done(struct uart_port *uport)
+{
+ int done;
+ u32 irq_clear = M_CMD_DONE_EN;
+
+ done = qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+ M_CMD_DONE_EN, true);
+ if (!done) {
+ writel_relaxed(M_GENI_CMD_ABORT, uport->membase +
+ SE_GENI_M_CMD_CTRL_REG);
+ irq_clear |= M_CMD_ABORT_EN;
+ qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+ M_CMD_ABORT_EN, true);
+ }
+ writel_relaxed(irq_clear, uport->membase + SE_GENI_M_IRQ_CLEAR);
+}
+
+static void qcom_geni_serial_abort_rx(struct uart_port *uport)
+{
+ u32 irq_clear = S_CMD_DONE_EN | S_CMD_ABORT_EN;
+
+ writel(S_GENI_CMD_ABORT, uport->membase + SE_GENI_S_CMD_CTRL_REG);
+ qcom_geni_serial_poll_bit(uport, SE_GENI_S_CMD_CTRL_REG,
+ S_GENI_CMD_ABORT, false);
+ writel_relaxed(irq_clear, uport->membase + SE_GENI_S_IRQ_CLEAR);
+ writel_relaxed(FORCE_DEFAULT, uport->membase + GENI_FORCE_DEFAULT_REG);
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+static int qcom_geni_serial_get_char(struct uart_port *uport)
+{
+ u32 rx_fifo;
+ u32 status;
+
+ status = readl_relaxed(uport->membase + SE_GENI_M_IRQ_STATUS);
+ writel_relaxed(status, uport->membase + SE_GENI_M_IRQ_CLEAR);
+
+ status = readl_relaxed(uport->membase + SE_GENI_S_IRQ_STATUS);
+ writel_relaxed(status, uport->membase + SE_GENI_S_IRQ_CLEAR);
+
+ /*
+ * Ensure the writes to clear interrupts is not re-ordered after
+ * reading the data.
+ */
+ mb();
+
+ status = readl_relaxed(uport->membase + SE_GENI_RX_FIFO_STATUS);
+ if (!(status & RX_FIFO_WC_MSK))
+ return NO_POLL_CHAR;
+
+ rx_fifo = readl(uport->membase + SE_GENI_RX_FIFOn);
+ return rx_fifo & 0xff;
+}
+
+static void qcom_geni_serial_poll_put_char(struct uart_port *uport,
+ unsigned char c)
+{
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+
+ writel_relaxed(port->tx_wm, uport->membase + SE_GENI_TX_WATERMARK_REG);
+ qcom_geni_serial_setup_tx(uport, 1);
+ WARN_ON(!qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+ M_TX_FIFO_WATERMARK_EN, true));
+ writel_relaxed(c, uport->membase + SE_GENI_TX_FIFOn);
+ writel_relaxed(M_TX_FIFO_WATERMARK_EN, uport->membase +
+ SE_GENI_M_IRQ_CLEAR);
+ qcom_geni_serial_poll_tx_done(uport);
+}
+#endif
+
+#ifdef CONFIG_SERIAL_QCOM_GENI_CONSOLE
+static void qcom_geni_serial_wr_char(struct uart_port *uport, int ch)
+{
+ writel_relaxed(ch, uport->membase + SE_GENI_TX_FIFOn);
+}
+
+static void
+__qcom_geni_serial_console_write(struct uart_port *uport, const char *s,
+ unsigned int count)
+{
+ int i;
+ u32 bytes_to_send = count;
+
+ for (i = 0; i < count; i++) {
+ /*
+ * uart_console_write() adds a carriage return for each newline.
+ * Account for additional bytes to be written.
+ */
+ if (s[i] == '\n')
+ bytes_to_send++;
+ }
+
+ writel_relaxed(DEF_TX_WM, uport->membase + SE_GENI_TX_WATERMARK_REG);
+ qcom_geni_serial_setup_tx(uport, bytes_to_send);
+ for (i = 0; i < count; ) {
+ size_t chars_to_write = 0;
+ size_t avail = DEF_FIFO_DEPTH_WORDS - DEF_TX_WM;
+
+ /*
+ * If the WM bit never set, then the Tx state machine is not
+ * in a valid state, so break, cancel/abort any existing
+ * command. Unfortunately the current data being written is
+ * lost.
+ */
+ if (!qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+ M_TX_FIFO_WATERMARK_EN, true))
+ break;
+ chars_to_write = min_t(size_t, count - i, avail / 2);
+ uart_console_write(uport, s + i, chars_to_write,
+ qcom_geni_serial_wr_char);
+ writel_relaxed(M_TX_FIFO_WATERMARK_EN, uport->membase +
+ SE_GENI_M_IRQ_CLEAR);
+ i += chars_to_write;
+ }
+ qcom_geni_serial_poll_tx_done(uport);
+}
+
+static void qcom_geni_serial_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ struct uart_port *uport;
+ struct qcom_geni_serial_port *port;
+ bool locked = true;
+ unsigned long flags;
+
+ WARN_ON(co->index < 0 || co->index >= GENI_UART_CONS_PORTS);
+
+ port = get_port_from_line(co->index);
+ if (IS_ERR(port))
+ return;
+
+ uport = &port->uport;
+ if (oops_in_progress)
+ locked = spin_trylock_irqsave(&uport->lock, flags);
+ else
+ spin_lock_irqsave(&uport->lock, flags);
+
+ /* Cancel the current write to log the fault */
+ if (!locked) {
+ geni_se_cancel_m_cmd(&port->se);
+ if (!qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+ M_CMD_CANCEL_EN, true)) {
+ geni_se_abort_m_cmd(&port->se);
+ qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+ M_CMD_ABORT_EN, true);
+ writel_relaxed(M_CMD_ABORT_EN, uport->membase +
+ SE_GENI_M_IRQ_CLEAR);
+ }
+ writel_relaxed(M_CMD_CANCEL_EN, uport->membase +
+ SE_GENI_M_IRQ_CLEAR);
+ }
+
+ __qcom_geni_serial_console_write(uport, s, count);
+ if (locked)
+ spin_unlock_irqrestore(&uport->lock, flags);
+}
+
+static int handle_rx_console(struct uart_port *uport, u32 bytes, bool drop)
+{
+ u32 i;
+ unsigned char buf[sizeof(u32)];
+ struct tty_port *tport;
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+
+ tport = &uport->state->port;
+ for (i = 0; i < bytes; ) {
+ int c;
+ int chunk = min_t(int, bytes - i, port->rx_bytes_pw);
+
+ ioread32_rep(uport->membase + SE_GENI_RX_FIFOn, buf, 1);
+ i += chunk;
+ if (drop)
+ continue;
+
+ for (c = 0; c < chunk; c++) {
+ int sysrq;
+
+ uport->icount.rx++;
+ if (port->brk && buf[c] == 0) {
+ port->brk = false;
+ if (uart_handle_break(uport))
+ continue;
+ }
+
+ sysrq = uart_handle_sysrq_char(uport, buf[c]);
+ if (!sysrq)
+ tty_insert_flip_char(tport, buf[c], TTY_NORMAL);
+ }
+ }
+ if (!drop)
+ tty_flip_buffer_push(tport);
+ return 0;
+}
+#else
+static int handle_rx_console(struct uart_port *uport, u32 bytes, bool drop)
+{
+ return -EPERM;
+}
+
+#endif /* CONFIG_SERIAL_QCOM_GENI_CONSOLE */
+
+static void qcom_geni_serial_start_tx(struct uart_port *uport)
+{
+ u32 irq_en;
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+ u32 status;
+
+ if (port->xfer_mode == GENI_SE_FIFO) {
+ /*
+ * readl ensures reading & writing of IRQ_EN register
+ * is not re-ordered before checking the status of the
+ * Serial Engine.
+ */
+ status = readl(uport->membase + SE_GENI_STATUS);
+ if (status & M_GENI_CMD_ACTIVE)
+ return;
+
+ if (!qcom_geni_serial_tx_empty(uport))
+ return;
+
+ irq_en = readl_relaxed(uport->membase + SE_GENI_M_IRQ_EN);
+ irq_en |= M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN;
+
+ writel_relaxed(port->tx_wm, uport->membase +
+ SE_GENI_TX_WATERMARK_REG);
+ writel_relaxed(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
+ }
+}
+
+static void qcom_geni_serial_stop_tx(struct uart_port *uport)
+{
+ u32 irq_en;
+ u32 status;
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+
+ irq_en = readl_relaxed(uport->membase + SE_GENI_M_IRQ_EN);
+ irq_en &= ~M_CMD_DONE_EN;
+ if (port->xfer_mode == GENI_SE_FIFO) {
+ irq_en &= ~M_TX_FIFO_WATERMARK_EN;
+ writel_relaxed(0, uport->membase +
+ SE_GENI_TX_WATERMARK_REG);
+ }
+ writel_relaxed(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
+ status = readl_relaxed(uport->membase + SE_GENI_STATUS);
+ /* Possible stop tx is called multiple times. */
+ if (!(status & M_GENI_CMD_ACTIVE))
+ return;
+
+ /*
+ * Ensure cancel command write is not re-ordered before checking
+ * the status of the Primary Sequencer.
+ */
+ mb();
+
+ geni_se_cancel_m_cmd(&port->se);
+ if (!qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+ M_CMD_CANCEL_EN, true)) {
+ geni_se_abort_m_cmd(&port->se);
+ qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+ M_CMD_ABORT_EN, true);
+ writel_relaxed(M_CMD_ABORT_EN, uport->membase +
+ SE_GENI_M_IRQ_CLEAR);
+ }
+ writel_relaxed(M_CMD_CANCEL_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
+}
+
+static void qcom_geni_serial_start_rx(struct uart_port *uport)
+{
+ u32 irq_en;
+ u32 status;
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+
+ status = readl_relaxed(uport->membase + SE_GENI_STATUS);
+ if (status & S_GENI_CMD_ACTIVE)
+ qcom_geni_serial_stop_rx(uport);
+
+ /*
+ * Ensure setup command write is not re-ordered before checking
+ * the status of the Secondary Sequencer.
+ */
+ mb();
+
+ geni_se_setup_s_cmd(&port->se, UART_START_READ, 0);
+
+ if (port->xfer_mode == GENI_SE_FIFO) {
+ irq_en = readl_relaxed(uport->membase + SE_GENI_S_IRQ_EN);
+ irq_en |= S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN;
+ writel_relaxed(irq_en, uport->membase + SE_GENI_S_IRQ_EN);
+
+ irq_en = readl_relaxed(uport->membase + SE_GENI_M_IRQ_EN);
+ irq_en |= M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN;
+ writel_relaxed(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
+ }
+}
+
+static void qcom_geni_serial_stop_rx(struct uart_port *uport)
+{
+ u32 irq_en;
+ u32 status;
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+ u32 irq_clear = S_CMD_DONE_EN;
+
+ if (port->xfer_mode == GENI_SE_FIFO) {
+ irq_en = readl_relaxed(uport->membase + SE_GENI_S_IRQ_EN);
+ irq_en &= ~(S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN);
+ writel_relaxed(irq_en, uport->membase + SE_GENI_S_IRQ_EN);
+
+ irq_en = readl_relaxed(uport->membase + SE_GENI_M_IRQ_EN);
+ irq_en &= ~(M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN);
+ writel_relaxed(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
+ }
+
+ status = readl_relaxed(uport->membase + SE_GENI_STATUS);
+ /* Possible stop rx is called multiple times. */
+ if (!(status & S_GENI_CMD_ACTIVE))
+ return;
+
+ /*
+ * Ensure cancel command write is not re-ordered before checking
+ * the status of the Secondary Sequencer.
+ */
+ mb();
+
+ geni_se_cancel_s_cmd(&port->se);
+ qcom_geni_serial_poll_bit(uport, SE_GENI_S_CMD_CTRL_REG,
+ S_GENI_CMD_CANCEL, false);
+ status = readl_relaxed(uport->membase + SE_GENI_STATUS);
+ writel_relaxed(irq_clear, uport->membase + SE_GENI_S_IRQ_CLEAR);
+ if (status & S_GENI_CMD_ACTIVE)
+ qcom_geni_serial_abort_rx(uport);
+}
+
+static void qcom_geni_serial_handle_rx(struct uart_port *uport, bool drop)
+{
+ u32 status;
+ u32 word_cnt;
+ u32 last_word_byte_cnt;
+ u32 last_word_partial;
+ u32 total_bytes;
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+
+ status = readl_relaxed(uport->membase + SE_GENI_RX_FIFO_STATUS);
+ word_cnt = status & RX_FIFO_WC_MSK;
+ last_word_partial = status & RX_LAST;
+ last_word_byte_cnt = (status & RX_LAST_BYTE_VALID_MSK) >>
+ RX_LAST_BYTE_VALID_SHFT;
+
+ if (!word_cnt)
+ return;
+ total_bytes = port->rx_bytes_pw * (word_cnt - 1);
+ if (last_word_partial && last_word_byte_cnt)
+ total_bytes += last_word_byte_cnt;
+ else
+ total_bytes += port->rx_bytes_pw;
+ port->handle_rx(uport, total_bytes, drop);
+}
+
+static void qcom_geni_serial_handle_tx(struct uart_port *uport)
+{
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+ struct circ_buf *xmit = &uport->state->xmit;
+ size_t avail;
+ size_t remaining;
+ int i;
+ u32 status;
+ unsigned int chunk;
+ int tail;
+
+ chunk = uart_circ_chars_pending(xmit);
+ status = readl_relaxed(uport->membase + SE_GENI_TX_FIFO_STATUS);
+ /* Both FIFO and framework buffer are drained */
+ if (!chunk && !status) {
+ qcom_geni_serial_stop_tx(uport);
+ goto out_write_wakeup;
+ }
+
+ avail = (port->tx_fifo_depth - port->tx_wm) * port->tx_bytes_pw;
+ tail = xmit->tail;
+ chunk = min3((size_t)chunk, (size_t)(UART_XMIT_SIZE - tail), avail);
+ if (!chunk)
+ goto out_write_wakeup;
+
+ qcom_geni_serial_setup_tx(uport, chunk);
+
+ remaining = chunk;
+ for (i = 0; i < chunk; ) {
+ unsigned int tx_bytes;
+ u8 buf[sizeof(u32)];
+ int c;
+
+ tx_bytes = min_t(size_t, remaining, port->tx_bytes_pw);
+ for (c = 0; c < tx_bytes ; c++)
+ buf[c] = xmit->buf[tail + c];
+
+ iowrite32_rep(uport->membase + SE_GENI_TX_FIFOn, buf, 1);
+
+ i += tx_bytes;
+ tail += tx_bytes;
+ uport->icount.tx += tx_bytes;
+ remaining -= tx_bytes;
+ }
+
+ xmit->tail = tail & (UART_XMIT_SIZE - 1);
+ qcom_geni_serial_poll_tx_done(uport);
+out_write_wakeup:
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(uport);
+}
+
+static irqreturn_t qcom_geni_serial_isr(int isr, void *dev)
+{
+ unsigned int m_irq_status;
+ unsigned int s_irq_status;
+ struct uart_port *uport = dev;
+ unsigned long flags;
+ unsigned int m_irq_en;
+ bool drop_rx = false;
+ struct tty_port *tport = &uport->state->port;
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+
+ if (uport->suspended)
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&uport->lock, flags);
+ m_irq_status = readl_relaxed(uport->membase + SE_GENI_M_IRQ_STATUS);
+ s_irq_status = readl_relaxed(uport->membase + SE_GENI_S_IRQ_STATUS);
+ m_irq_en = readl_relaxed(uport->membase + SE_GENI_M_IRQ_EN);
+ writel_relaxed(m_irq_status, uport->membase + SE_GENI_M_IRQ_CLEAR);
+ writel_relaxed(s_irq_status, uport->membase + SE_GENI_S_IRQ_CLEAR);
+
+ if (WARN_ON(m_irq_status & M_ILLEGAL_CMD_EN))
+ goto out_unlock;
+
+ if (s_irq_status & S_RX_FIFO_WR_ERR_EN) {
+ uport->icount.overrun++;
+ tty_insert_flip_char(tport, 0, TTY_OVERRUN);
+ }
+
+ if (m_irq_status & (M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN) &&
+ m_irq_en & (M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN))
+ qcom_geni_serial_handle_tx(uport);
+
+ if (s_irq_status & S_GP_IRQ_0_EN || s_irq_status & S_GP_IRQ_1_EN) {
+ if (s_irq_status & S_GP_IRQ_0_EN)
+ uport->icount.parity++;
+ drop_rx = true;
+ } else if (s_irq_status & S_GP_IRQ_2_EN ||
+ s_irq_status & S_GP_IRQ_3_EN) {
+ uport->icount.brk++;
+ port->brk = true;
+ }
+
+ if (s_irq_status & S_RX_FIFO_WATERMARK_EN ||
+ s_irq_status & S_RX_FIFO_LAST_EN)
+ qcom_geni_serial_handle_rx(uport, drop_rx);
+
+out_unlock:
+ spin_unlock_irqrestore(&uport->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static void get_tx_fifo_size(struct qcom_geni_serial_port *port)
+{
+ struct uart_port *uport;
+
+ uport = &port->uport;
+ port->tx_fifo_depth = geni_se_get_tx_fifo_depth(&port->se);
+ port->tx_fifo_width = geni_se_get_tx_fifo_width(&port->se);
+ port->rx_fifo_depth = geni_se_get_rx_fifo_depth(&port->se);
+ uport->fifosize =
+ (port->tx_fifo_depth * port->tx_fifo_width) / BITS_PER_BYTE;
+}
+
+static void set_rfr_wm(struct qcom_geni_serial_port *port)
+{
+ /*
+ * Set RFR (Flow off) to FIFO_DEPTH - 2.
+ * RX WM level at 10% RX_FIFO_DEPTH.
+ * TX WM level at 10% TX_FIFO_DEPTH.
+ */
+ port->rx_rfr = port->rx_fifo_depth - 2;
+ port->rx_wm = UART_CONSOLE_RX_WM;
+ port->tx_wm = DEF_TX_WM;
+}
+
+static void qcom_geni_serial_shutdown(struct uart_port *uport)
+{
+ unsigned long flags;
+
+ /* Stop the console before stopping the current tx */
+ console_stop(uport->cons);
+
+ free_irq(uport->irq, uport);
+ spin_lock_irqsave(&uport->lock, flags);
+ qcom_geni_serial_stop_tx(uport);
+ qcom_geni_serial_stop_rx(uport);
+ spin_unlock_irqrestore(&uport->lock, flags);
+}
+
+static int qcom_geni_serial_port_setup(struct uart_port *uport)
+{
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+ unsigned int rxstale = DEFAULT_BITS_PER_CHAR * STALE_TIMEOUT;
+
+ set_rfr_wm(port);
+ writel_relaxed(rxstale, uport->membase + SE_UART_RX_STALE_CNT);
+ /*
+ * Make an unconditional cancel on the main sequencer to reset
+ * it else we could end up in data loss scenarios.
+ */
+ port->xfer_mode = GENI_SE_FIFO;
+ qcom_geni_serial_poll_tx_done(uport);
+ geni_se_config_packing(&port->se, BITS_PER_BYTE, port->tx_bytes_pw,
+ false, true, false);
+ geni_se_config_packing(&port->se, BITS_PER_BYTE, port->rx_bytes_pw,
+ false, false, true);
+ geni_se_init(&port->se, port->rx_wm, port->rx_rfr);
+ geni_se_select_mode(&port->se, port->xfer_mode);
+ port->setup = true;
+ return 0;
+}
+
+static int qcom_geni_serial_startup(struct uart_port *uport)
+{
+ int ret;
+ u32 proto;
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+
+ scnprintf(port->name, sizeof(port->name),
+ "qcom_serial_geni%d", uport->line);
+
+ proto = geni_se_read_proto(&port->se);
+ if (proto != GENI_SE_UART) {
+ dev_err(uport->dev, "Invalid FW loaded, proto: %d\n", proto);
+ return -ENXIO;
+ }
+
+ get_tx_fifo_size(port);
+ if (!port->setup) {
+ ret = qcom_geni_serial_port_setup(uport);
+ if (ret)
+ return ret;
+ }
+
+ ret = request_irq(uport->irq, qcom_geni_serial_isr, IRQF_TRIGGER_HIGH,
+ port->name, uport);
+ if (ret)
+ dev_err(uport->dev, "Failed to get IRQ ret %d\n", ret);
+ return ret;
+}
+
+static unsigned long get_clk_cfg(unsigned long clk_freq)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(root_freq); i++) {
+ if (!(root_freq[i] % clk_freq))
+ return root_freq[i];
+ }
+ return 0;
+}
+
+static unsigned long get_clk_div_rate(unsigned int baud, unsigned int *clk_div)
+{
+ unsigned long ser_clk;
+ unsigned long desired_clk;
+
+ desired_clk = baud * UART_OVERSAMPLING;
+ ser_clk = get_clk_cfg(desired_clk);
+ if (!ser_clk) {
+ pr_err("%s: Can't find matching DFS entry for baud %d\n",
+ __func__, baud);
+ return ser_clk;
+ }
+
+ *clk_div = ser_clk / desired_clk;
+ return ser_clk;
+}
+
+static void qcom_geni_serial_set_termios(struct uart_port *uport,
+ struct ktermios *termios, struct ktermios *old)
+{
+ unsigned int baud;
+ unsigned int bits_per_char;
+ unsigned int tx_trans_cfg;
+ unsigned int tx_parity_cfg;
+ unsigned int rx_trans_cfg;
+ unsigned int rx_parity_cfg;
+ unsigned int stop_bit_len;
+ unsigned int clk_div;
+ unsigned long ser_clk_cfg;
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+ unsigned long clk_rate;
+
+ qcom_geni_serial_stop_rx(uport);
+ /* baud rate */
+ baud = uart_get_baud_rate(uport, termios, old, 300, 4000000);
+ port->baud = baud;
+ clk_rate = get_clk_div_rate(baud, &clk_div);
+ if (!clk_rate)
+ goto out_restart_rx;
+
+ uport->uartclk = clk_rate;
+ clk_set_rate(port->se.clk, clk_rate);
+ ser_clk_cfg = SER_CLK_EN;
+ ser_clk_cfg |= clk_div << CLK_DIV_SHFT;
+
+ /* parity */
+ tx_trans_cfg = readl_relaxed(uport->membase + SE_UART_TX_TRANS_CFG);
+ tx_parity_cfg = readl_relaxed(uport->membase + SE_UART_TX_PARITY_CFG);
+ rx_trans_cfg = readl_relaxed(uport->membase + SE_UART_RX_TRANS_CFG);
+ rx_parity_cfg = readl_relaxed(uport->membase + SE_UART_RX_PARITY_CFG);
+ if (termios->c_cflag & PARENB) {
+ tx_trans_cfg |= UART_TX_PAR_EN;
+ rx_trans_cfg |= UART_RX_PAR_EN;
+ tx_parity_cfg |= PAR_CALC_EN;
+ rx_parity_cfg |= PAR_CALC_EN;
+ if (termios->c_cflag & PARODD) {
+ tx_parity_cfg |= PAR_ODD;
+ rx_parity_cfg |= PAR_ODD;
+ } else if (termios->c_cflag & CMSPAR) {
+ tx_parity_cfg |= PAR_SPACE;
+ rx_parity_cfg |= PAR_SPACE;
+ } else {
+ tx_parity_cfg |= PAR_EVEN;
+ rx_parity_cfg |= PAR_EVEN;
+ }
+ } else {
+ tx_trans_cfg &= ~UART_TX_PAR_EN;
+ rx_trans_cfg &= ~UART_RX_PAR_EN;
+ tx_parity_cfg &= ~PAR_CALC_EN;
+ rx_parity_cfg &= ~PAR_CALC_EN;
+ }
+
+ /* bits per char */
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ bits_per_char = 5;
+ break;
+ case CS6:
+ bits_per_char = 6;
+ break;
+ case CS7:
+ bits_per_char = 7;
+ break;
+ case CS8:
+ default:
+ bits_per_char = 8;
+ break;
+ }
+
+ /* stop bits */
+ if (termios->c_cflag & CSTOPB)
+ stop_bit_len = TX_STOP_BIT_LEN_2;
+ else
+ stop_bit_len = TX_STOP_BIT_LEN_1;
+
+ /* flow control, clear the CTS_MASK bit if using flow control. */
+ if (termios->c_cflag & CRTSCTS)
+ tx_trans_cfg &= ~UART_CTS_MASK;
+ else
+ tx_trans_cfg |= UART_CTS_MASK;
+
+ if (baud)
+ uart_update_timeout(uport, termios->c_cflag, baud);
+
+ writel_relaxed(tx_trans_cfg, uport->membase + SE_UART_TX_TRANS_CFG);
+ writel_relaxed(tx_parity_cfg, uport->membase + SE_UART_TX_PARITY_CFG);
+ writel_relaxed(rx_trans_cfg, uport->membase + SE_UART_RX_TRANS_CFG);
+ writel_relaxed(rx_parity_cfg, uport->membase + SE_UART_RX_PARITY_CFG);
+ writel_relaxed(bits_per_char, uport->membase + SE_UART_TX_WORD_LEN);
+ writel_relaxed(bits_per_char, uport->membase + SE_UART_RX_WORD_LEN);
+ writel_relaxed(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN);
+ writel_relaxed(ser_clk_cfg, uport->membase + GENI_SER_M_CLK_CFG);
+ writel_relaxed(ser_clk_cfg, uport->membase + GENI_SER_S_CLK_CFG);
+out_restart_rx:
+ qcom_geni_serial_start_rx(uport);
+}
+
+static unsigned int qcom_geni_serial_tx_empty(struct uart_port *uport)
+{
+ return !readl(uport->membase + SE_GENI_TX_FIFO_STATUS);
+}
+
+#ifdef CONFIG_SERIAL_QCOM_GENI_CONSOLE
+static int __init qcom_geni_console_setup(struct console *co, char *options)
+{
+ struct uart_port *uport;
+ struct qcom_geni_serial_port *port;
+ int baud;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ if (co->index >= GENI_UART_CONS_PORTS || co->index < 0)
+ return -ENXIO;
+
+ port = get_port_from_line(co->index);
+ if (IS_ERR(port)) {
+ pr_err("Invalid line %d\n", co->index);
+ return PTR_ERR(port);
+ }
+
+ uport = &port->uport;
+
+ if (unlikely(!uport->membase))
+ return -ENXIO;
+
+ if (geni_se_resources_on(&port->se)) {
+ dev_err(port->se.dev, "Error turning on resources\n");
+ return -ENXIO;
+ }
+
+ if (unlikely(geni_se_read_proto(&port->se) != GENI_SE_UART)) {
+ geni_se_resources_off(&port->se);
+ return -ENXIO;
+ }
+
+ if (!port->setup) {
+ port->tx_bytes_pw = 1;
+ port->rx_bytes_pw = RX_BYTES_PW;
+ qcom_geni_serial_stop_rx(uport);
+ qcom_geni_serial_port_setup(uport);
+ }
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(uport, co, baud, parity, bits, flow);
+}
+
+static void qcom_geni_serial_earlycon_write(struct console *con,
+ const char *s, unsigned int n)
+{
+ struct earlycon_device *dev = con->data;
+
+ __qcom_geni_serial_console_write(&dev->port, s, n);
+}
+
+static int __init qcom_geni_serial_earlycon_setup(struct earlycon_device *dev,
+ const char *opt)
+{
+ struct uart_port *uport = &dev->port;
+ u32 tx_trans_cfg;
+ u32 tx_parity_cfg = 0; /* Disable Tx Parity */
+ u32 rx_trans_cfg = 0;
+ u32 rx_parity_cfg = 0; /* Disable Rx Parity */
+ u32 stop_bit_len = 0; /* Default stop bit length - 1 bit */
+ u32 bits_per_char;
+ struct geni_se se;
+
+ if (!uport->membase)
+ return -EINVAL;
+
+ memset(&se, 0, sizeof(se));
+ se.base = uport->membase;
+ if (geni_se_read_proto(&se) != GENI_SE_UART)
+ return -ENXIO;
+ /*
+ * Ignore Flow control.
+ * n = 8.
+ */
+ tx_trans_cfg = UART_CTS_MASK;
+ bits_per_char = BITS_PER_BYTE;
+
+ /*
+ * Make an unconditional cancel on the main sequencer to reset
+ * it else we could end up in data loss scenarios.
+ */
+ qcom_geni_serial_poll_tx_done(uport);
+ qcom_geni_serial_abort_rx(uport);
+ geni_se_config_packing(&se, BITS_PER_BYTE, 1, false, true, false);
+ geni_se_init(&se, DEF_FIFO_DEPTH_WORDS / 2, DEF_FIFO_DEPTH_WORDS - 2);
+ geni_se_select_mode(&se, GENI_SE_FIFO);
+
+ writel_relaxed(tx_trans_cfg, uport->membase + SE_UART_TX_TRANS_CFG);
+ writel_relaxed(tx_parity_cfg, uport->membase + SE_UART_TX_PARITY_CFG);
+ writel_relaxed(rx_trans_cfg, uport->membase + SE_UART_RX_TRANS_CFG);
+ writel_relaxed(rx_parity_cfg, uport->membase + SE_UART_RX_PARITY_CFG);
+ writel_relaxed(bits_per_char, uport->membase + SE_UART_TX_WORD_LEN);
+ writel_relaxed(bits_per_char, uport->membase + SE_UART_RX_WORD_LEN);
+ writel_relaxed(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN);
+
+ dev->con->write = qcom_geni_serial_earlycon_write;
+ dev->con->setup = NULL;
+ return 0;
+}
+OF_EARLYCON_DECLARE(qcom_geni, "qcom,geni-debug-uart",
+ qcom_geni_serial_earlycon_setup);
+
+static int __init console_register(struct uart_driver *drv)
+{
+ return uart_register_driver(drv);
+}
+
+static void console_unregister(struct uart_driver *drv)
+{
+ uart_unregister_driver(drv);
+}
+
+static struct console cons_ops = {
+ .name = "ttyMSM",
+ .write = qcom_geni_serial_console_write,
+ .device = uart_console_device,
+ .setup = qcom_geni_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &qcom_geni_console_driver,
+};
+
+static struct uart_driver qcom_geni_console_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "qcom_geni_console",
+ .dev_name = "ttyMSM",
+ .nr = GENI_UART_CONS_PORTS,
+ .cons = &cons_ops,
+};
+#else
+static int console_register(struct uart_driver *drv)
+{
+ return 0;
+}
+
+static void console_unregister(struct uart_driver *drv)
+{
+}
+#endif /* CONFIG_SERIAL_QCOM_GENI_CONSOLE */
+
+static void qcom_geni_serial_cons_pm(struct uart_port *uport,
+ unsigned int new_state, unsigned int old_state)
+{
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+
+ if (unlikely(!uart_console(uport)))
+ return;
+
+ if (new_state == UART_PM_STATE_ON && old_state == UART_PM_STATE_OFF)
+ geni_se_resources_on(&port->se);
+ else if (new_state == UART_PM_STATE_OFF &&
+ old_state == UART_PM_STATE_ON)
+ geni_se_resources_off(&port->se);
+}
+
+static const struct uart_ops qcom_geni_console_pops = {
+ .tx_empty = qcom_geni_serial_tx_empty,
+ .stop_tx = qcom_geni_serial_stop_tx,
+ .start_tx = qcom_geni_serial_start_tx,
+ .stop_rx = qcom_geni_serial_stop_rx,
+ .set_termios = qcom_geni_serial_set_termios,
+ .startup = qcom_geni_serial_startup,
+ .request_port = qcom_geni_serial_request_port,
+ .config_port = qcom_geni_serial_config_port,
+ .shutdown = qcom_geni_serial_shutdown,
+ .type = qcom_geni_serial_get_type,
+ .set_mctrl = qcom_geni_cons_set_mctrl,
+ .get_mctrl = qcom_geni_cons_get_mctrl,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = qcom_geni_serial_get_char,
+ .poll_put_char = qcom_geni_serial_poll_put_char,
+#endif
+ .pm = qcom_geni_serial_cons_pm,
+};
+
+static int qcom_geni_serial_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ int line = -1;
+ struct qcom_geni_serial_port *port;
+ struct uart_port *uport;
+ struct resource *res;
+ int irq;
+
+ if (pdev->dev.of_node)
+ line = of_alias_get_id(pdev->dev.of_node, "serial");
+
+ if (line < 0 || line >= GENI_UART_CONS_PORTS)
+ return -ENXIO;
+ port = get_port_from_line(line);
+ if (IS_ERR(port)) {
+ dev_err(&pdev->dev, "Invalid line %d\n", line);
+ return PTR_ERR(port);
+ }
+
+ uport = &port->uport;
+ /* Don't allow 2 drivers to access the same port */
+ if (uport->private_data)
+ return -ENODEV;
+
+ uport->dev = &pdev->dev;
+ port->se.dev = &pdev->dev;
+ port->se.wrapper = dev_get_drvdata(pdev->dev.parent);
+ port->se.clk = devm_clk_get(&pdev->dev, "se");
+ if (IS_ERR(port->se.clk)) {
+ ret = PTR_ERR(port->se.clk);
+ dev_err(&pdev->dev, "Err getting SE Core clk %d\n", ret);
+ return ret;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+ uport->mapbase = res->start;
+
+ port->tx_fifo_depth = DEF_FIFO_DEPTH_WORDS;
+ port->rx_fifo_depth = DEF_FIFO_DEPTH_WORDS;
+ port->tx_fifo_width = DEF_FIFO_WIDTH_BITS;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Failed to get IRQ %d\n", irq);
+ return irq;
+ }
+ uport->irq = irq;
+
+ uport->private_data = &qcom_geni_console_driver;
+ platform_set_drvdata(pdev, port);
+ port->handle_rx = handle_rx_console;
+ return uart_add_one_port(&qcom_geni_console_driver, uport);
+}
+
+static int qcom_geni_serial_remove(struct platform_device *pdev)
+{
+ struct qcom_geni_serial_port *port = platform_get_drvdata(pdev);
+ struct uart_driver *drv = port->uport.private_data;
+
+ uart_remove_one_port(drv, &port->uport);
+ return 0;
+}
+
+static int __maybe_unused qcom_geni_serial_sys_suspend_noirq(struct device *dev)
+{
+ struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
+ struct uart_port *uport = &port->uport;
+
+ uart_suspend_port(uport->private_data, uport);
+ return 0;
+}
+
+static int __maybe_unused qcom_geni_serial_sys_resume_noirq(struct device *dev)
+{
+ struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
+ struct uart_port *uport = &port->uport;
+
+ if (console_suspend_enabled && uport->suspended) {
+ uart_resume_port(uport->private_data, uport);
+ /*
+ * uart_suspend_port() invokes port shutdown which in turn
+ * frees the irq. uart_resume_port invokes port startup which
+ * performs request_irq. The request_irq auto-enables the IRQ.
+ * In addition, resume_noirq implicitly enables the IRQ and
+ * leads to an unbalanced IRQ enable warning. Disable the IRQ
+ * before returning so that the warning is suppressed.
+ */
+ disable_irq(uport->irq);
+ }
+ return 0;
+}
+
+static const struct dev_pm_ops qcom_geni_serial_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(qcom_geni_serial_sys_suspend_noirq,
+ qcom_geni_serial_sys_resume_noirq)
+};
+
+static const struct of_device_id qcom_geni_serial_match_table[] = {
+ { .compatible = "qcom,geni-debug-uart", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_geni_serial_match_table);
+
+static struct platform_driver qcom_geni_serial_platform_driver = {
+ .remove = qcom_geni_serial_remove,
+ .probe = qcom_geni_serial_probe,
+ .driver = {
+ .name = "qcom_geni_serial",
+ .of_match_table = qcom_geni_serial_match_table,
+ .pm = &qcom_geni_serial_pm_ops,
+ },
+};
+
+static int __init qcom_geni_serial_init(void)
+{
+ int ret;
+
+ ret = console_register(&qcom_geni_console_driver);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&qcom_geni_serial_platform_driver);
+ if (ret)
+ console_unregister(&qcom_geni_console_driver);
+ return ret;
+}
+module_init(qcom_geni_serial_init);
+
+static void __exit qcom_geni_serial_exit(void)
+{
+ platform_driver_unregister(&qcom_geni_serial_platform_driver);
+ console_unregister(&qcom_geni_console_driver);
+}
+module_exit(qcom_geni_serial_exit);
+
+MODULE_DESCRIPTION("Serial driver for GENI based QUP cores");
+MODULE_LICENSE("GPL v2");
diff --git a/rr-cache/5784c3b9d4a4d66742fc4285016624c722e91bc7/preimage b/rr-cache/5784c3b9d4a4d66742fc4285016624c722e91bc7/preimage
new file mode 100644
index 0000000..b5b5090
--- /dev/null
+++ b/rr-cache/5784c3b9d4a4d66742fc4285016624c722e91bc7/preimage
@@ -0,0 +1,1228 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
+
+#include <linux/clk.h>
+#include <linux/console.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/qcom-geni-se.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+
+/* UART specific GENI registers */
+#define SE_UART_TX_TRANS_CFG 0x25c
+#define SE_UART_TX_WORD_LEN 0x268
+#define SE_UART_TX_STOP_BIT_LEN 0x26c
+#define SE_UART_TX_TRANS_LEN 0x270
+#define SE_UART_RX_TRANS_CFG 0x280
+#define SE_UART_RX_WORD_LEN 0x28c
+#define SE_UART_RX_STALE_CNT 0x294
+#define SE_UART_TX_PARITY_CFG 0x2a4
+#define SE_UART_RX_PARITY_CFG 0x2a8
+
+/* SE_UART_TRANS_CFG */
+#define UART_TX_PAR_EN BIT(0)
+#define UART_CTS_MASK BIT(1)
+
+/* SE_UART_TX_WORD_LEN */
+#define TX_WORD_LEN_MSK GENMASK(9, 0)
+
+/* SE_UART_TX_STOP_BIT_LEN */
+#define TX_STOP_BIT_LEN_MSK GENMASK(23, 0)
+#define TX_STOP_BIT_LEN_1 0
+#define TX_STOP_BIT_LEN_1_5 1
+#define TX_STOP_BIT_LEN_2 2
+
+/* SE_UART_TX_TRANS_LEN */
+#define TX_TRANS_LEN_MSK GENMASK(23, 0)
+
+/* SE_UART_RX_TRANS_CFG */
+#define UART_RX_INS_STATUS_BIT BIT(2)
+#define UART_RX_PAR_EN BIT(3)
+
+/* SE_UART_RX_WORD_LEN */
+#define RX_WORD_LEN_MASK GENMASK(9, 0)
+
+/* SE_UART_RX_STALE_CNT */
+#define RX_STALE_CNT GENMASK(23, 0)
+
+/* SE_UART_TX_PARITY_CFG/RX_PARITY_CFG */
+#define PAR_CALC_EN BIT(0)
+#define PAR_MODE_MSK GENMASK(2, 1)
+#define PAR_MODE_SHFT 1
+#define PAR_EVEN 0x00
+#define PAR_ODD 0x01
+#define PAR_SPACE 0x10
+#define PAR_MARK 0x11
+
+/* UART M_CMD OP codes */
+#define UART_START_TX 0x1
+#define UART_START_BREAK 0x4
+#define UART_STOP_BREAK 0x5
+/* UART S_CMD OP codes */
+#define UART_START_READ 0x1
+#define UART_PARAM 0x1
+
+#define UART_OVERSAMPLING 32
+#define STALE_TIMEOUT 16
+#define DEFAULT_BITS_PER_CHAR 10
+#define GENI_UART_CONS_PORTS 1
+#define DEF_FIFO_DEPTH_WORDS 16
+#define DEF_TX_WM 2
+#define DEF_FIFO_WIDTH_BITS 32
+#define UART_CONSOLE_RX_WM 2
+
+#ifdef CONFIG_CONSOLE_POLL
+#define RX_BYTES_PW 1
+#else
+#define RX_BYTES_PW 4
+#endif
+
+struct qcom_geni_serial_port {
+ struct uart_port uport;
+ struct geni_se se;
+ char name[20];
+ u32 tx_fifo_depth;
+ u32 tx_fifo_width;
+ u32 rx_fifo_depth;
+ u32 tx_wm;
+ u32 rx_wm;
+ u32 rx_rfr;
+ enum geni_se_xfer_mode xfer_mode;
+ bool setup;
+ int (*handle_rx)(struct uart_port *uport, u32 bytes, bool drop);
+ unsigned int baud;
+ unsigned int tx_bytes_pw;
+ unsigned int rx_bytes_pw;
+ bool brk;
+};
+
+static const struct uart_ops qcom_geni_console_pops;
+static struct uart_driver qcom_geni_console_driver;
+static int handle_rx_console(struct uart_port *uport, u32 bytes, bool drop);
+static unsigned int qcom_geni_serial_tx_empty(struct uart_port *port);
+static void qcom_geni_serial_stop_rx(struct uart_port *uport);
+
+static const unsigned long root_freq[] = {7372800, 14745600, 19200000, 29491200,
+ 32000000, 48000000, 64000000, 80000000,
+ 96000000, 100000000};
+
+#define to_dev_port(ptr, member) \
+ container_of(ptr, struct qcom_geni_serial_port, member)
+
+static struct qcom_geni_serial_port qcom_geni_console_port = {
+ .uport = {
+ .iotype = UPIO_MEM,
+ .ops = &qcom_geni_console_pops,
+ .flags = UPF_BOOT_AUTOCONF,
+ .line = 0,
+ },
+};
+
+static int qcom_geni_serial_request_port(struct uart_port *uport)
+{
+ struct platform_device *pdev = to_platform_device(uport->dev);
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ uport->membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(uport->membase))
+ return PTR_ERR(uport->membase);
+ port->se.base = uport->membase;
+ return 0;
+}
+
+static void qcom_geni_serial_config_port(struct uart_port *uport, int cfg_flags)
+{
+ if (cfg_flags & UART_CONFIG_TYPE) {
+ uport->type = PORT_MSM;
+ qcom_geni_serial_request_port(uport);
+ }
+}
+
+static unsigned int qcom_geni_cons_get_mctrl(struct uart_port *uport)
+{
+ return TIOCM_DSR | TIOCM_CAR | TIOCM_CTS;
+}
+
+static void qcom_geni_cons_set_mctrl(struct uart_port *uport,
+ unsigned int mctrl)
+{
+}
+
+static const char *qcom_geni_serial_get_type(struct uart_port *uport)
+{
+ return "MSM";
+}
+
+static struct qcom_geni_serial_port *get_port_from_line(int line)
+{
+ if (line < 0 || line >= GENI_UART_CONS_PORTS)
+ return ERR_PTR(-ENXIO);
+ return &qcom_geni_console_port;
+}
+
+static bool qcom_geni_serial_poll_bit(struct uart_port *uport,
+ int offset, int field, bool set)
+{
+ u32 reg;
+ struct qcom_geni_serial_port *port;
+ unsigned int baud;
+ unsigned int fifo_bits;
+ unsigned long timeout_us = 20000;
+
+ /* Ensure polling is not re-ordered before the prior writes/reads */
+ mb();
+
+ if (uport->private_data) {
+ port = to_dev_port(uport, uport);
+ baud = port->baud;
+ if (!baud)
+ baud = 115200;
+ fifo_bits = port->tx_fifo_depth * port->tx_fifo_width;
+ /*
+ * Total polling iterations based on FIFO worth of bytes to be
+ * sent at current baud. Add a little fluff to the wait.
+ */
+ timeout_us = ((fifo_bits * USEC_PER_SEC) / baud) + 500;
+ }
+
+ /*
+ * Use custom implementation instead of readl_poll_atomic since ktimer
+ * is not ready at the time of early console.
+ */
+ timeout_us = DIV_ROUND_UP(timeout_us, 10) * 10;
+ while (timeout_us) {
+ reg = readl_relaxed(uport->membase + offset);
+ if ((bool)(reg & field) == set)
+ return true;
+ udelay(10);
+ timeout_us -= 10;
+ }
+ return false;
+}
+
+static void qcom_geni_serial_setup_tx(struct uart_port *uport, u32 xmit_size)
+{
+ u32 m_cmd;
+
+ writel_relaxed(xmit_size, uport->membase + SE_UART_TX_TRANS_LEN);
+ m_cmd = UART_START_TX << M_OPCODE_SHFT;
+ writel(m_cmd, uport->membase + SE_GENI_M_CMD0);
+}
+
+static void qcom_geni_serial_poll_tx_done(struct uart_port *uport)
+{
+ int done;
+ u32 irq_clear = M_CMD_DONE_EN;
+
+ done = qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+ M_CMD_DONE_EN, true);
+ if (!done) {
+ writel_relaxed(M_GENI_CMD_ABORT, uport->membase +
+ SE_GENI_M_CMD_CTRL_REG);
+ irq_clear |= M_CMD_ABORT_EN;
+ qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+ M_CMD_ABORT_EN, true);
+ }
+ writel_relaxed(irq_clear, uport->membase + SE_GENI_M_IRQ_CLEAR);
+}
+
+static void qcom_geni_serial_abort_rx(struct uart_port *uport)
+{
+ u32 irq_clear = S_CMD_DONE_EN | S_CMD_ABORT_EN;
+
+ writel(S_GENI_CMD_ABORT, uport->membase + SE_GENI_S_CMD_CTRL_REG);
+ qcom_geni_serial_poll_bit(uport, SE_GENI_S_CMD_CTRL_REG,
+ S_GENI_CMD_ABORT, false);
+ writel_relaxed(irq_clear, uport->membase + SE_GENI_S_IRQ_CLEAR);
+ writel_relaxed(FORCE_DEFAULT, uport->membase + GENI_FORCE_DEFAULT_REG);
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+static int qcom_geni_serial_get_char(struct uart_port *uport)
+{
+ u32 rx_fifo;
+ u32 status;
+
+ status = readl_relaxed(uport->membase + SE_GENI_M_IRQ_STATUS);
+ writel_relaxed(status, uport->membase + SE_GENI_M_IRQ_CLEAR);
+
+ status = readl_relaxed(uport->membase + SE_GENI_S_IRQ_STATUS);
+ writel_relaxed(status, uport->membase + SE_GENI_S_IRQ_CLEAR);
+
+ /*
+ * Ensure the writes to clear interrupts is not re-ordered after
+ * reading the data.
+ */
+ mb();
+
+ status = readl_relaxed(uport->membase + SE_GENI_RX_FIFO_STATUS);
+ if (!(status & RX_FIFO_WC_MSK))
+ return NO_POLL_CHAR;
+
+ rx_fifo = readl(uport->membase + SE_GENI_RX_FIFOn);
+ return rx_fifo & 0xff;
+}
+
+static void qcom_geni_serial_poll_put_char(struct uart_port *uport,
+ unsigned char c)
+{
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+
+ writel_relaxed(port->tx_wm, uport->membase + SE_GENI_TX_WATERMARK_REG);
+ qcom_geni_serial_setup_tx(uport, 1);
+ WARN_ON(!qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+ M_TX_FIFO_WATERMARK_EN, true));
+ writel_relaxed(c, uport->membase + SE_GENI_TX_FIFOn);
+ writel_relaxed(M_TX_FIFO_WATERMARK_EN, uport->membase +
+ SE_GENI_M_IRQ_CLEAR);
+ qcom_geni_serial_poll_tx_done(uport);
+}
+#endif
+
+#ifdef CONFIG_SERIAL_QCOM_GENI_CONSOLE
+static void qcom_geni_serial_wr_char(struct uart_port *uport, int ch)
+{
+ writel_relaxed(ch, uport->membase + SE_GENI_TX_FIFOn);
+}
+
+static void
+__qcom_geni_serial_console_write(struct uart_port *uport, const char *s,
+ unsigned int count)
+{
+ int i;
+ u32 bytes_to_send = count;
+
+ for (i = 0; i < count; i++) {
+ /*
+ * uart_console_write() adds a carriage return for each newline.
+ * Account for additional bytes to be written.
+ */
+ if (s[i] == '\n')
+ bytes_to_send++;
+ }
+
+ writel_relaxed(DEF_TX_WM, uport->membase + SE_GENI_TX_WATERMARK_REG);
+ qcom_geni_serial_setup_tx(uport, bytes_to_send);
+ for (i = 0; i < count; ) {
+ size_t chars_to_write = 0;
+ size_t avail = DEF_FIFO_DEPTH_WORDS - DEF_TX_WM;
+
+ /*
+ * If the WM bit never set, then the Tx state machine is not
+ * in a valid state, so break, cancel/abort any existing
+ * command. Unfortunately the current data being written is
+ * lost.
+ */
+ if (!qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+ M_TX_FIFO_WATERMARK_EN, true))
+ break;
+ chars_to_write = min_t(size_t, count - i, avail / 2);
+ uart_console_write(uport, s + i, chars_to_write,
+ qcom_geni_serial_wr_char);
+ writel_relaxed(M_TX_FIFO_WATERMARK_EN, uport->membase +
+ SE_GENI_M_IRQ_CLEAR);
+ i += chars_to_write;
+ }
+ qcom_geni_serial_poll_tx_done(uport);
+}
+
+static void qcom_geni_serial_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ struct uart_port *uport;
+ struct qcom_geni_serial_port *port;
+ bool locked = true;
+ unsigned long flags;
+
+ WARN_ON(co->index < 0 || co->index >= GENI_UART_CONS_PORTS);
+
+ port = get_port_from_line(co->index);
+ if (IS_ERR(port))
+ return;
+
+ uport = &port->uport;
+ if (oops_in_progress)
+ locked = spin_trylock_irqsave(&uport->lock, flags);
+ else
+ spin_lock_irqsave(&uport->lock, flags);
+
+ /* Cancel the current write to log the fault */
+ if (!locked) {
+ geni_se_cancel_m_cmd(&port->se);
+ if (!qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+ M_CMD_CANCEL_EN, true)) {
+ geni_se_abort_m_cmd(&port->se);
+ qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+ M_CMD_ABORT_EN, true);
+ writel_relaxed(M_CMD_ABORT_EN, uport->membase +
+ SE_GENI_M_IRQ_CLEAR);
+ }
+ writel_relaxed(M_CMD_CANCEL_EN, uport->membase +
+ SE_GENI_M_IRQ_CLEAR);
+ }
+
+ __qcom_geni_serial_console_write(uport, s, count);
+ if (locked)
+ spin_unlock_irqrestore(&uport->lock, flags);
+}
+
+static int handle_rx_console(struct uart_port *uport, u32 bytes, bool drop)
+{
+ u32 i;
+ unsigned char buf[sizeof(u32)];
+ struct tty_port *tport;
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+
+ tport = &uport->state->port;
+ for (i = 0; i < bytes; ) {
+ int c;
+ int chunk = min_t(int, bytes - i, port->rx_bytes_pw);
+
+ ioread32_rep(uport->membase + SE_GENI_RX_FIFOn, buf, 1);
+ i += chunk;
+ if (drop)
+ continue;
+
+ for (c = 0; c < chunk; c++) {
+ int sysrq;
+
+ uport->icount.rx++;
+ if (port->brk && buf[c] == 0) {
+ port->brk = false;
+ if (uart_handle_break(uport))
+ continue;
+ }
+
+ sysrq = uart_handle_sysrq_char(uport, buf[c]);
+ if (!sysrq)
+ tty_insert_flip_char(tport, buf[c], TTY_NORMAL);
+ }
+ }
+ if (!drop)
+ tty_flip_buffer_push(tport);
+ return 0;
+}
+#else
+static int handle_rx_console(struct uart_port *uport, u32 bytes, bool drop)
+{
+ return -EPERM;
+}
+
+#endif /* CONFIG_SERIAL_QCOM_GENI_CONSOLE */
+
+static void qcom_geni_serial_start_tx(struct uart_port *uport)
+{
+ u32 irq_en;
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+ u32 status;
+
+ if (port->xfer_mode == GENI_SE_FIFO) {
+ /*
+ * readl ensures reading & writing of IRQ_EN register
+ * is not re-ordered before checking the status of the
+ * Serial Engine.
+ */
+ status = readl(uport->membase + SE_GENI_STATUS);
+ if (status & M_GENI_CMD_ACTIVE)
+ return;
+
+ if (!qcom_geni_serial_tx_empty(uport))
+ return;
+
+ irq_en = readl_relaxed(uport->membase + SE_GENI_M_IRQ_EN);
+ irq_en |= M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN;
+
+ writel_relaxed(port->tx_wm, uport->membase +
+ SE_GENI_TX_WATERMARK_REG);
+ writel_relaxed(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
+ }
+}
+
+static void qcom_geni_serial_stop_tx(struct uart_port *uport)
+{
+ u32 irq_en;
+ u32 status;
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+
+ irq_en = readl_relaxed(uport->membase + SE_GENI_M_IRQ_EN);
+ irq_en &= ~M_CMD_DONE_EN;
+ if (port->xfer_mode == GENI_SE_FIFO) {
+ irq_en &= ~M_TX_FIFO_WATERMARK_EN;
+ writel_relaxed(0, uport->membase +
+ SE_GENI_TX_WATERMARK_REG);
+ }
+ writel_relaxed(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
+ status = readl_relaxed(uport->membase + SE_GENI_STATUS);
+ /* Possible stop tx is called multiple times. */
+ if (!(status & M_GENI_CMD_ACTIVE))
+ return;
+
+ /*
+ * Ensure cancel command write is not re-ordered before checking
+ * the status of the Primary Sequencer.
+ */
+ mb();
+
+ geni_se_cancel_m_cmd(&port->se);
+ if (!qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+ M_CMD_CANCEL_EN, true)) {
+ geni_se_abort_m_cmd(&port->se);
+ qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+ M_CMD_ABORT_EN, true);
+ writel_relaxed(M_CMD_ABORT_EN, uport->membase +
+ SE_GENI_M_IRQ_CLEAR);
+ }
+ writel_relaxed(M_CMD_CANCEL_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
+}
+
+static void qcom_geni_serial_start_rx(struct uart_port *uport)
+{
+ u32 irq_en;
+ u32 status;
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+
+ status = readl_relaxed(uport->membase + SE_GENI_STATUS);
+ if (status & S_GENI_CMD_ACTIVE)
+ qcom_geni_serial_stop_rx(uport);
+
+ /*
+ * Ensure setup command write is not re-ordered before checking
+ * the status of the Secondary Sequencer.
+ */
+ mb();
+
+ geni_se_setup_s_cmd(&port->se, UART_START_READ, 0);
+
+ if (port->xfer_mode == GENI_SE_FIFO) {
+ irq_en = readl_relaxed(uport->membase + SE_GENI_S_IRQ_EN);
+ irq_en |= S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN;
+ writel_relaxed(irq_en, uport->membase + SE_GENI_S_IRQ_EN);
+
+ irq_en = readl_relaxed(uport->membase + SE_GENI_M_IRQ_EN);
+ irq_en |= M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN;
+ writel_relaxed(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
+ }
+}
+
+static void qcom_geni_serial_stop_rx(struct uart_port *uport)
+{
+ u32 irq_en;
+ u32 status;
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+ u32 irq_clear = S_CMD_DONE_EN;
+
+ if (port->xfer_mode == GENI_SE_FIFO) {
+ irq_en = readl_relaxed(uport->membase + SE_GENI_S_IRQ_EN);
+ irq_en &= ~(S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN);
+ writel_relaxed(irq_en, uport->membase + SE_GENI_S_IRQ_EN);
+
+ irq_en = readl_relaxed(uport->membase + SE_GENI_M_IRQ_EN);
+ irq_en &= ~(M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN);
+ writel_relaxed(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
+ }
+
+ status = readl_relaxed(uport->membase + SE_GENI_STATUS);
+ /* Possible stop rx is called multiple times. */
+ if (!(status & S_GENI_CMD_ACTIVE))
+ return;
+
+ /*
+ * Ensure cancel command write is not re-ordered before checking
+ * the status of the Secondary Sequencer.
+ */
+ mb();
+
+ geni_se_cancel_s_cmd(&port->se);
+ qcom_geni_serial_poll_bit(uport, SE_GENI_S_CMD_CTRL_REG,
+ S_GENI_CMD_CANCEL, false);
+ status = readl_relaxed(uport->membase + SE_GENI_STATUS);
+ writel_relaxed(irq_clear, uport->membase + SE_GENI_S_IRQ_CLEAR);
+ if (status & S_GENI_CMD_ACTIVE)
+ qcom_geni_serial_abort_rx(uport);
+}
+
+static void qcom_geni_serial_handle_rx(struct uart_port *uport, bool drop)
+{
+ u32 status;
+ u32 word_cnt;
+ u32 last_word_byte_cnt;
+ u32 last_word_partial;
+ u32 total_bytes;
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+
+ status = readl_relaxed(uport->membase + SE_GENI_RX_FIFO_STATUS);
+ word_cnt = status & RX_FIFO_WC_MSK;
+ last_word_partial = status & RX_LAST;
+ last_word_byte_cnt = (status & RX_LAST_BYTE_VALID_MSK) >>
+ RX_LAST_BYTE_VALID_SHFT;
+
+ if (!word_cnt)
+ return;
+ total_bytes = port->rx_bytes_pw * (word_cnt - 1);
+ if (last_word_partial && last_word_byte_cnt)
+ total_bytes += last_word_byte_cnt;
+ else
+ total_bytes += port->rx_bytes_pw;
+ port->handle_rx(uport, total_bytes, drop);
+}
+
+static void qcom_geni_serial_handle_tx(struct uart_port *uport)
+{
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+ struct circ_buf *xmit = &uport->state->xmit;
+ size_t avail;
+ size_t remaining;
+ int i;
+ u32 status;
+ unsigned int chunk;
+ int tail;
+
+ chunk = uart_circ_chars_pending(xmit);
+ status = readl_relaxed(uport->membase + SE_GENI_TX_FIFO_STATUS);
+ /* Both FIFO and framework buffer are drained */
+ if (!chunk && !status) {
+ qcom_geni_serial_stop_tx(uport);
+ goto out_write_wakeup;
+ }
+
+ avail = (port->tx_fifo_depth - port->tx_wm) * port->tx_bytes_pw;
+ tail = xmit->tail;
+ chunk = min3((size_t)chunk, (size_t)(UART_XMIT_SIZE - tail), avail);
+ if (!chunk)
+ goto out_write_wakeup;
+
+ qcom_geni_serial_setup_tx(uport, chunk);
+
+ remaining = chunk;
+ for (i = 0; i < chunk; ) {
+ unsigned int tx_bytes;
+ u8 buf[sizeof(u32)];
+ int c;
+
+<<<<<<<
+=======
+ memset(buf, 0, ARRAY_SIZE(buf));
+>>>>>>>
+ tx_bytes = min_t(size_t, remaining, port->tx_bytes_pw);
+ for (c = 0; c < tx_bytes ; c++)
+ buf[c] = xmit->buf[tail + c];
+
+ iowrite32_rep(uport->membase + SE_GENI_TX_FIFOn, buf, 1);
+
+ i += tx_bytes;
+ tail += tx_bytes;
+ uport->icount.tx += tx_bytes;
+ remaining -= tx_bytes;
+ }
+
+ xmit->tail = tail & (UART_XMIT_SIZE - 1);
+ qcom_geni_serial_poll_tx_done(uport);
+out_write_wakeup:
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(uport);
+}
+
+static irqreturn_t qcom_geni_serial_isr(int isr, void *dev)
+{
+ unsigned int m_irq_status;
+ unsigned int s_irq_status;
+ struct uart_port *uport = dev;
+ unsigned long flags;
+ unsigned int m_irq_en;
+ bool drop_rx = false;
+ struct tty_port *tport = &uport->state->port;
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+
+ if (uport->suspended)
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&uport->lock, flags);
+ m_irq_status = readl_relaxed(uport->membase + SE_GENI_M_IRQ_STATUS);
+ s_irq_status = readl_relaxed(uport->membase + SE_GENI_S_IRQ_STATUS);
+ m_irq_en = readl_relaxed(uport->membase + SE_GENI_M_IRQ_EN);
+ writel_relaxed(m_irq_status, uport->membase + SE_GENI_M_IRQ_CLEAR);
+ writel_relaxed(s_irq_status, uport->membase + SE_GENI_S_IRQ_CLEAR);
+
+ if (WARN_ON(m_irq_status & M_ILLEGAL_CMD_EN))
+ goto out_unlock;
+
+ if (s_irq_status & S_RX_FIFO_WR_ERR_EN) {
+ uport->icount.overrun++;
+ tty_insert_flip_char(tport, 0, TTY_OVERRUN);
+ }
+
+ if (m_irq_status & (M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN) &&
+ m_irq_en & (M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN))
+ qcom_geni_serial_handle_tx(uport);
+
+ if (s_irq_status & S_GP_IRQ_0_EN || s_irq_status & S_GP_IRQ_1_EN) {
+ if (s_irq_status & S_GP_IRQ_0_EN)
+ uport->icount.parity++;
+ drop_rx = true;
+ } else if (s_irq_status & S_GP_IRQ_2_EN ||
+ s_irq_status & S_GP_IRQ_3_EN) {
+ uport->icount.brk++;
+ port->brk = true;
+ }
+
+ if (s_irq_status & S_RX_FIFO_WATERMARK_EN ||
+ s_irq_status & S_RX_FIFO_LAST_EN)
+ qcom_geni_serial_handle_rx(uport, drop_rx);
+
+out_unlock:
+ spin_unlock_irqrestore(&uport->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static void get_tx_fifo_size(struct qcom_geni_serial_port *port)
+{
+ struct uart_port *uport;
+
+ uport = &port->uport;
+ port->tx_fifo_depth = geni_se_get_tx_fifo_depth(&port->se);
+ port->tx_fifo_width = geni_se_get_tx_fifo_width(&port->se);
+ port->rx_fifo_depth = geni_se_get_rx_fifo_depth(&port->se);
+ uport->fifosize =
+ (port->tx_fifo_depth * port->tx_fifo_width) / BITS_PER_BYTE;
+}
+
+static void set_rfr_wm(struct qcom_geni_serial_port *port)
+{
+ /*
+ * Set RFR (Flow off) to FIFO_DEPTH - 2.
+ * RX WM level at 10% RX_FIFO_DEPTH.
+ * TX WM level at 10% TX_FIFO_DEPTH.
+ */
+ port->rx_rfr = port->rx_fifo_depth - 2;
+ port->rx_wm = UART_CONSOLE_RX_WM;
+ port->tx_wm = DEF_TX_WM;
+}
+
+static void qcom_geni_serial_shutdown(struct uart_port *uport)
+{
+ unsigned long flags;
+
+ /* Stop the console before stopping the current tx */
+ console_stop(uport->cons);
+
+ free_irq(uport->irq, uport);
+ spin_lock_irqsave(&uport->lock, flags);
+ qcom_geni_serial_stop_tx(uport);
+ qcom_geni_serial_stop_rx(uport);
+ spin_unlock_irqrestore(&uport->lock, flags);
+}
+
+static int qcom_geni_serial_port_setup(struct uart_port *uport)
+{
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+ unsigned int rxstale = DEFAULT_BITS_PER_CHAR * STALE_TIMEOUT;
+
+ set_rfr_wm(port);
+ writel_relaxed(rxstale, uport->membase + SE_UART_RX_STALE_CNT);
+ /*
+ * Make an unconditional cancel on the main sequencer to reset
+ * it else we could end up in data loss scenarios.
+ */
+ port->xfer_mode = GENI_SE_FIFO;
+ qcom_geni_serial_poll_tx_done(uport);
+ geni_se_config_packing(&port->se, BITS_PER_BYTE, port->tx_bytes_pw,
+ false, true, false);
+ geni_se_config_packing(&port->se, BITS_PER_BYTE, port->rx_bytes_pw,
+ false, false, true);
+ geni_se_init(&port->se, port->rx_wm, port->rx_rfr);
+ geni_se_select_mode(&port->se, port->xfer_mode);
+ port->setup = true;
+ return 0;
+}
+
+static int qcom_geni_serial_startup(struct uart_port *uport)
+{
+ int ret;
+ u32 proto;
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+
+ scnprintf(port->name, sizeof(port->name),
+ "qcom_serial_geni%d", uport->line);
+
+ proto = geni_se_read_proto(&port->se);
+ if (proto != GENI_SE_UART) {
+ dev_err(uport->dev, "Invalid FW loaded, proto: %d\n", proto);
+ return -ENXIO;
+ }
+
+ get_tx_fifo_size(port);
+ if (!port->setup) {
+ ret = qcom_geni_serial_port_setup(uport);
+ if (ret)
+ return ret;
+ }
+
+ ret = request_irq(uport->irq, qcom_geni_serial_isr, IRQF_TRIGGER_HIGH,
+ port->name, uport);
+ if (ret)
+ dev_err(uport->dev, "Failed to get IRQ ret %d\n", ret);
+ return ret;
+}
+
+static unsigned long get_clk_cfg(unsigned long clk_freq)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(root_freq); i++) {
+ if (!(root_freq[i] % clk_freq))
+ return root_freq[i];
+ }
+ return 0;
+}
+
+static unsigned long get_clk_div_rate(unsigned int baud, unsigned int *clk_div)
+{
+ unsigned long ser_clk;
+ unsigned long desired_clk;
+
+ desired_clk = baud * UART_OVERSAMPLING;
+ ser_clk = get_clk_cfg(desired_clk);
+ if (!ser_clk) {
+ pr_err("%s: Can't find matching DFS entry for baud %d\n",
+ __func__, baud);
+ return ser_clk;
+ }
+
+ *clk_div = ser_clk / desired_clk;
+ return ser_clk;
+}
+
+static void qcom_geni_serial_set_termios(struct uart_port *uport,
+ struct ktermios *termios, struct ktermios *old)
+{
+ unsigned int baud;
+ unsigned int bits_per_char;
+ unsigned int tx_trans_cfg;
+ unsigned int tx_parity_cfg;
+ unsigned int rx_trans_cfg;
+ unsigned int rx_parity_cfg;
+ unsigned int stop_bit_len;
+ unsigned int clk_div;
+ unsigned long ser_clk_cfg;
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+ unsigned long clk_rate;
+
+ qcom_geni_serial_stop_rx(uport);
+ /* baud rate */
+ baud = uart_get_baud_rate(uport, termios, old, 300, 4000000);
+ port->baud = baud;
+ clk_rate = get_clk_div_rate(baud, &clk_div);
+ if (!clk_rate)
+ goto out_restart_rx;
+
+ uport->uartclk = clk_rate;
+ clk_set_rate(port->se.clk, clk_rate);
+ ser_clk_cfg = SER_CLK_EN;
+ ser_clk_cfg |= clk_div << CLK_DIV_SHFT;
+
+ /* parity */
+ tx_trans_cfg = readl_relaxed(uport->membase + SE_UART_TX_TRANS_CFG);
+ tx_parity_cfg = readl_relaxed(uport->membase + SE_UART_TX_PARITY_CFG);
+ rx_trans_cfg = readl_relaxed(uport->membase + SE_UART_RX_TRANS_CFG);
+ rx_parity_cfg = readl_relaxed(uport->membase + SE_UART_RX_PARITY_CFG);
+ if (termios->c_cflag & PARENB) {
+ tx_trans_cfg |= UART_TX_PAR_EN;
+ rx_trans_cfg |= UART_RX_PAR_EN;
+ tx_parity_cfg |= PAR_CALC_EN;
+ rx_parity_cfg |= PAR_CALC_EN;
+ if (termios->c_cflag & PARODD) {
+ tx_parity_cfg |= PAR_ODD;
+ rx_parity_cfg |= PAR_ODD;
+ } else if (termios->c_cflag & CMSPAR) {
+ tx_parity_cfg |= PAR_SPACE;
+ rx_parity_cfg |= PAR_SPACE;
+ } else {
+ tx_parity_cfg |= PAR_EVEN;
+ rx_parity_cfg |= PAR_EVEN;
+ }
+ } else {
+ tx_trans_cfg &= ~UART_TX_PAR_EN;
+ rx_trans_cfg &= ~UART_RX_PAR_EN;
+ tx_parity_cfg &= ~PAR_CALC_EN;
+ rx_parity_cfg &= ~PAR_CALC_EN;
+ }
+
+ /* bits per char */
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ bits_per_char = 5;
+ break;
+ case CS6:
+ bits_per_char = 6;
+ break;
+ case CS7:
+ bits_per_char = 7;
+ break;
+ case CS8:
+ default:
+ bits_per_char = 8;
+ break;
+ }
+
+ /* stop bits */
+ if (termios->c_cflag & CSTOPB)
+ stop_bit_len = TX_STOP_BIT_LEN_2;
+ else
+ stop_bit_len = TX_STOP_BIT_LEN_1;
+
+ /* flow control, clear the CTS_MASK bit if using flow control. */
+ if (termios->c_cflag & CRTSCTS)
+ tx_trans_cfg &= ~UART_CTS_MASK;
+ else
+ tx_trans_cfg |= UART_CTS_MASK;
+
+ if (baud)
+ uart_update_timeout(uport, termios->c_cflag, baud);
+
+ writel_relaxed(tx_trans_cfg, uport->membase + SE_UART_TX_TRANS_CFG);
+ writel_relaxed(tx_parity_cfg, uport->membase + SE_UART_TX_PARITY_CFG);
+ writel_relaxed(rx_trans_cfg, uport->membase + SE_UART_RX_TRANS_CFG);
+ writel_relaxed(rx_parity_cfg, uport->membase + SE_UART_RX_PARITY_CFG);
+ writel_relaxed(bits_per_char, uport->membase + SE_UART_TX_WORD_LEN);
+ writel_relaxed(bits_per_char, uport->membase + SE_UART_RX_WORD_LEN);
+ writel_relaxed(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN);
+ writel_relaxed(ser_clk_cfg, uport->membase + GENI_SER_M_CLK_CFG);
+ writel_relaxed(ser_clk_cfg, uport->membase + GENI_SER_S_CLK_CFG);
+out_restart_rx:
+ qcom_geni_serial_start_rx(uport);
+}
+
+static unsigned int qcom_geni_serial_tx_empty(struct uart_port *uport)
+{
+ return !readl(uport->membase + SE_GENI_TX_FIFO_STATUS);
+}
+
+#ifdef CONFIG_SERIAL_QCOM_GENI_CONSOLE
+static int __init qcom_geni_console_setup(struct console *co, char *options)
+{
+ struct uart_port *uport;
+ struct qcom_geni_serial_port *port;
+ int baud;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ if (co->index >= GENI_UART_CONS_PORTS || co->index < 0)
+ return -ENXIO;
+
+ port = get_port_from_line(co->index);
+ if (IS_ERR(port)) {
+ pr_err("Invalid line %d\n", co->index);
+ return PTR_ERR(port);
+ }
+
+ uport = &port->uport;
+
+ if (unlikely(!uport->membase))
+ return -ENXIO;
+
+ if (geni_se_resources_on(&port->se)) {
+ dev_err(port->se.dev, "Error turning on resources\n");
+ return -ENXIO;
+ }
+
+ if (unlikely(geni_se_read_proto(&port->se) != GENI_SE_UART)) {
+ geni_se_resources_off(&port->se);
+ return -ENXIO;
+ }
+
+ if (!port->setup) {
+ port->tx_bytes_pw = 1;
+ port->rx_bytes_pw = RX_BYTES_PW;
+ qcom_geni_serial_stop_rx(uport);
+ qcom_geni_serial_port_setup(uport);
+ }
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(uport, co, baud, parity, bits, flow);
+}
+
+static void qcom_geni_serial_earlycon_write(struct console *con,
+ const char *s, unsigned int n)
+{
+ struct earlycon_device *dev = con->data;
+
+ __qcom_geni_serial_console_write(&dev->port, s, n);
+}
+
+static int __init qcom_geni_serial_earlycon_setup(struct earlycon_device *dev,
+ const char *opt)
+{
+ struct uart_port *uport = &dev->port;
+ u32 tx_trans_cfg;
+ u32 tx_parity_cfg = 0; /* Disable Tx Parity */
+ u32 rx_trans_cfg = 0;
+ u32 rx_parity_cfg = 0; /* Disable Rx Parity */
+ u32 stop_bit_len = 0; /* Default stop bit length - 1 bit */
+ u32 bits_per_char;
+ struct geni_se se;
+
+ if (!uport->membase)
+ return -EINVAL;
+
+ memset(&se, 0, sizeof(se));
+ se.base = uport->membase;
+ if (geni_se_read_proto(&se) != GENI_SE_UART)
+ return -ENXIO;
+ /*
+ * Ignore Flow control.
+ * n = 8.
+ */
+ tx_trans_cfg = UART_CTS_MASK;
+ bits_per_char = BITS_PER_BYTE;
+
+ /*
+ * Make an unconditional cancel on the main sequencer to reset
+ * it else we could end up in data loss scenarios.
+ */
+ qcom_geni_serial_poll_tx_done(uport);
+ qcom_geni_serial_abort_rx(uport);
+ geni_se_config_packing(&se, BITS_PER_BYTE, 1, false, true, false);
+ geni_se_init(&se, DEF_FIFO_DEPTH_WORDS / 2, DEF_FIFO_DEPTH_WORDS - 2);
+ geni_se_select_mode(&se, GENI_SE_FIFO);
+
+ writel_relaxed(tx_trans_cfg, uport->membase + SE_UART_TX_TRANS_CFG);
+ writel_relaxed(tx_parity_cfg, uport->membase + SE_UART_TX_PARITY_CFG);
+ writel_relaxed(rx_trans_cfg, uport->membase + SE_UART_RX_TRANS_CFG);
+ writel_relaxed(rx_parity_cfg, uport->membase + SE_UART_RX_PARITY_CFG);
+ writel_relaxed(bits_per_char, uport->membase + SE_UART_TX_WORD_LEN);
+ writel_relaxed(bits_per_char, uport->membase + SE_UART_RX_WORD_LEN);
+ writel_relaxed(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN);
+
+ dev->con->write = qcom_geni_serial_earlycon_write;
+ dev->con->setup = NULL;
+ return 0;
+}
+OF_EARLYCON_DECLARE(qcom_geni, "qcom,geni-debug-uart",
+ qcom_geni_serial_earlycon_setup);
+
+static int __init console_register(struct uart_driver *drv)
+{
+ return uart_register_driver(drv);
+}
+
+static void console_unregister(struct uart_driver *drv)
+{
+ uart_unregister_driver(drv);
+}
+
+static struct console cons_ops = {
+ .name = "ttyMSM",
+ .write = qcom_geni_serial_console_write,
+ .device = uart_console_device,
+ .setup = qcom_geni_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &qcom_geni_console_driver,
+};
+
+static struct uart_driver qcom_geni_console_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "qcom_geni_console",
+ .dev_name = "ttyMSM",
+ .nr = GENI_UART_CONS_PORTS,
+ .cons = &cons_ops,
+};
+#else
+static int console_register(struct uart_driver *drv)
+{
+ return 0;
+}
+
+static void console_unregister(struct uart_driver *drv)
+{
+}
+#endif /* CONFIG_SERIAL_QCOM_GENI_CONSOLE */
+
+static void qcom_geni_serial_cons_pm(struct uart_port *uport,
+ unsigned int new_state, unsigned int old_state)
+{
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+
+ if (unlikely(!uart_console(uport)))
+ return;
+
+ if (new_state == UART_PM_STATE_ON && old_state == UART_PM_STATE_OFF)
+ geni_se_resources_on(&port->se);
+ else if (new_state == UART_PM_STATE_OFF &&
+ old_state == UART_PM_STATE_ON)
+ geni_se_resources_off(&port->se);
+}
+
+static const struct uart_ops qcom_geni_console_pops = {
+ .tx_empty = qcom_geni_serial_tx_empty,
+ .stop_tx = qcom_geni_serial_stop_tx,
+ .start_tx = qcom_geni_serial_start_tx,
+ .stop_rx = qcom_geni_serial_stop_rx,
+ .set_termios = qcom_geni_serial_set_termios,
+ .startup = qcom_geni_serial_startup,
+ .request_port = qcom_geni_serial_request_port,
+ .config_port = qcom_geni_serial_config_port,
+ .shutdown = qcom_geni_serial_shutdown,
+ .type = qcom_geni_serial_get_type,
+ .set_mctrl = qcom_geni_cons_set_mctrl,
+ .get_mctrl = qcom_geni_cons_get_mctrl,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = qcom_geni_serial_get_char,
+ .poll_put_char = qcom_geni_serial_poll_put_char,
+#endif
+ .pm = qcom_geni_serial_cons_pm,
+};
+
+static int qcom_geni_serial_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ int line = -1;
+ struct qcom_geni_serial_port *port;
+ struct uart_port *uport;
+ struct resource *res;
+ int irq;
+
+ if (pdev->dev.of_node)
+ line = of_alias_get_id(pdev->dev.of_node, "serial");
+
+ if (line < 0 || line >= GENI_UART_CONS_PORTS)
+ return -ENXIO;
+ port = get_port_from_line(line);
+ if (IS_ERR(port)) {
+ dev_err(&pdev->dev, "Invalid line %d\n", line);
+ return PTR_ERR(port);
+ }
+
+ uport = &port->uport;
+ /* Don't allow 2 drivers to access the same port */
+ if (uport->private_data)
+ return -ENODEV;
+
+ uport->dev = &pdev->dev;
+ port->se.dev = &pdev->dev;
+ port->se.wrapper = dev_get_drvdata(pdev->dev.parent);
+ port->se.clk = devm_clk_get(&pdev->dev, "se");
+ if (IS_ERR(port->se.clk)) {
+ ret = PTR_ERR(port->se.clk);
+ dev_err(&pdev->dev, "Err getting SE Core clk %d\n", ret);
+ return ret;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+ uport->mapbase = res->start;
+
+ port->tx_fifo_depth = DEF_FIFO_DEPTH_WORDS;
+ port->rx_fifo_depth = DEF_FIFO_DEPTH_WORDS;
+ port->tx_fifo_width = DEF_FIFO_WIDTH_BITS;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Failed to get IRQ %d\n", irq);
+ return irq;
+ }
+ uport->irq = irq;
+
+ uport->private_data = &qcom_geni_console_driver;
+ platform_set_drvdata(pdev, port);
+ port->handle_rx = handle_rx_console;
+ return uart_add_one_port(&qcom_geni_console_driver, uport);
+}
+
+static int qcom_geni_serial_remove(struct platform_device *pdev)
+{
+ struct qcom_geni_serial_port *port = platform_get_drvdata(pdev);
+ struct uart_driver *drv = port->uport.private_data;
+
+ uart_remove_one_port(drv, &port->uport);
+ return 0;
+}
+
+static int __maybe_unused qcom_geni_serial_sys_suspend_noirq(struct device *dev)
+{
+ struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
+ struct uart_port *uport = &port->uport;
+
+ uart_suspend_port(uport->private_data, uport);
+ return 0;
+}
+
+static int __maybe_unused qcom_geni_serial_sys_resume_noirq(struct device *dev)
+{
+ struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
+ struct uart_port *uport = &port->uport;
+
+ if (console_suspend_enabled && uport->suspended) {
+ uart_resume_port(uport->private_data, uport);
+ /*
+ * uart_suspend_port() invokes port shutdown which in turn
+ * frees the irq. uart_resume_port invokes port startup which
+ * performs request_irq. The request_irq auto-enables the IRQ.
+ * In addition, resume_noirq implicitly enables the IRQ and
+ * leads to an unbalanced IRQ enable warning. Disable the IRQ
+ * before returning so that the warning is suppressed.
+ */
+ disable_irq(uport->irq);
+ }
+ return 0;
+}
+
+static const struct dev_pm_ops qcom_geni_serial_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(qcom_geni_serial_sys_suspend_noirq,
+ qcom_geni_serial_sys_resume_noirq)
+};
+
+static const struct of_device_id qcom_geni_serial_match_table[] = {
+ { .compatible = "qcom,geni-debug-uart", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_geni_serial_match_table);
+
+static struct platform_driver qcom_geni_serial_platform_driver = {
+ .remove = qcom_geni_serial_remove,
+ .probe = qcom_geni_serial_probe,
+ .driver = {
+ .name = "qcom_geni_serial",
+ .of_match_table = qcom_geni_serial_match_table,
+ .pm = &qcom_geni_serial_pm_ops,
+ },
+};
+
+static int __init qcom_geni_serial_init(void)
+{
+ int ret;
+
+ ret = console_register(&qcom_geni_console_driver);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&qcom_geni_serial_platform_driver);
+ if (ret)
+ console_unregister(&qcom_geni_console_driver);
+ return ret;
+}
+module_init(qcom_geni_serial_init);
+
+static void __exit qcom_geni_serial_exit(void)
+{
+ platform_driver_unregister(&qcom_geni_serial_platform_driver);
+ console_unregister(&qcom_geni_console_driver);
+}
+module_exit(qcom_geni_serial_exit);
+
+MODULE_DESCRIPTION("Serial driver for GENI based QUP cores");
+MODULE_LICENSE("GPL v2");
diff --git a/rr-cache/584605a29b683540fca16950917b48cb773a78be/preimage b/rr-cache/584605a29b683540fca16950917b48cb773a78be/preimage
new file mode 100644
index 0000000..c3b51ca
--- /dev/null
+++ b/rr-cache/584605a29b683540fca16950917b48cb773a78be/preimage
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SDM845 MTP board device tree source
+ *
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include "sdm845.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDM845 MTP";
+ compatible = "qcom,sdm845-mtp";
+<<<<<<<
+=======
+
+ aliases {
+ serial0 = &uart2;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+};
+
+&soc {
+ geniqup@ac0000 {
+ status = "okay";
+
+ serial@a84000 {
+ status = "okay";
+ };
+
+ i2c@a88000 {
+ clock-frequency = <400000>;
+ status = "okay";
+ };
+ };
+
+ pinctrl@3400000 {
+ qup-i2c10-default {
+ pinconf {
+ pins = "gpio55", "gpio56";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ qup-i2c10-sleep {
+ pinconf {
+ pins = "gpio55", "gpio56";
+ };
+ };
+
+ qup-uart2-default {
+ pinconf_tx {
+ pins = "gpio4";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ pinconf_rx {
+ pins = "gpio5";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ qup-uart2-sleep {
+ pinconf {
+ pins = "gpio4", "gpio5";
+ bias-pull-down;
+ };
+ };
+ };
+
+ phy@1d87000 {
+ status = "ok";
+
+ vdda-phy-supply = <&pm8998_l1>; /* 0.88v */
+ vdda-pll-supply = <&pm8998_l26>; /* 1.2v */
+ vdda-phy-max-microamp = <62900>;
+ vdda-pll-max-microamp = <18300>;
+ };
+
+ ufshc@1d84000 {
+ status = "ok";
+
+ vcc-supply = <&pm8998_l20>;
+ vcc-voltage-level = <2950000 2960000>;
+ vcc-max-microamp = <600000>;
+ vccq2-max-microamp = <600000>;
+
+ qcom,vddp-ref-clk-supply = <&pm8998_l2>;
+ qcom,vddp-ref-clk-max-microamp = <100>;
+
+ };
+};
+
+
+&apps_rsc {
+ pm8998-rpmh-regulators {
+ vdd_l7_l12_l14_l15-supply = <&pm8998_s5>;
+
+ smps2 {
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ qcom,regulator-initial-voltage = <1100000>;
+ };
+
+ smps5 {
+ regulator-min-microvolt = <1904000>;
+ regulator-max-microvolt = <2040000>;
+ qcom,regulator-initial-voltage = <1904000>;
+ };
+
+ ldo1 {
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <880000>;
+ qcom,regulator-initial-voltage = <880000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ qcom,allowed-modes =
+ <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ qcom,mode-threshold-currents = <0 1>;
+ };
+
+ ldo2 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ qcom,regulator-initial-voltage = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ qcom,allowed-modes =
+ <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ qcom,mode-threshold-currents = <0 30000>;
+ };
+
+
+ ldo7 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ qcom,regulator-initial-voltage = <1800000>;
+ qcom,headroom-voltage = <56000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ regulator-allow-set-load;
+ qcom,allowed-modes =
+ <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ qcom,mode-threshold-currents = <0 10000>;
+ };
+
+ ldo20 {
+ regulator-min-microvolt = <2704000>;
+ regulator-max-microvolt = <2960000>;
+ qcom,regulator-initial-voltage = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ qcom,allowed-modes =
+ <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ qcom,mode-threshold-currents = <0 10000>;
+ };
+
+ ldo26 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ qcom,regulator-initial-voltage = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ qcom,allowed-modes =
+ <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ qcom,mode-threshold-currents = <0 1>;
+ };
+
+ lvs1 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ };
+
+ pmi8998-rpmh-regulators {
+ bob {
+ regulator-min-microvolt = <3312000>;
+ regulator-max-microvolt = <3600000>;
+ qcom,regulator-initial-voltage = <3312000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_PASS>;
+ };
+ };
+>>>>>>>
+};
diff --git a/rr-cache/584605a29b683540fca16950917b48cb773a78be/preimage.1 b/rr-cache/584605a29b683540fca16950917b48cb773a78be/preimage.1
new file mode 100644
index 0000000..c3b51ca
--- /dev/null
+++ b/rr-cache/584605a29b683540fca16950917b48cb773a78be/preimage.1
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SDM845 MTP board device tree source
+ *
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include "sdm845.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDM845 MTP";
+ compatible = "qcom,sdm845-mtp";
+<<<<<<<
+=======
+
+ aliases {
+ serial0 = &uart2;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+};
+
+&soc {
+ geniqup@ac0000 {
+ status = "okay";
+
+ serial@a84000 {
+ status = "okay";
+ };
+
+ i2c@a88000 {
+ clock-frequency = <400000>;
+ status = "okay";
+ };
+ };
+
+ pinctrl@3400000 {
+ qup-i2c10-default {
+ pinconf {
+ pins = "gpio55", "gpio56";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ qup-i2c10-sleep {
+ pinconf {
+ pins = "gpio55", "gpio56";
+ };
+ };
+
+ qup-uart2-default {
+ pinconf_tx {
+ pins = "gpio4";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ pinconf_rx {
+ pins = "gpio5";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ qup-uart2-sleep {
+ pinconf {
+ pins = "gpio4", "gpio5";
+ bias-pull-down;
+ };
+ };
+ };
+
+ phy@1d87000 {
+ status = "ok";
+
+ vdda-phy-supply = <&pm8998_l1>; /* 0.88v */
+ vdda-pll-supply = <&pm8998_l26>; /* 1.2v */
+ vdda-phy-max-microamp = <62900>;
+ vdda-pll-max-microamp = <18300>;
+ };
+
+ ufshc@1d84000 {
+ status = "ok";
+
+ vcc-supply = <&pm8998_l20>;
+ vcc-voltage-level = <2950000 2960000>;
+ vcc-max-microamp = <600000>;
+ vccq2-max-microamp = <600000>;
+
+ qcom,vddp-ref-clk-supply = <&pm8998_l2>;
+ qcom,vddp-ref-clk-max-microamp = <100>;
+
+ };
+};
+
+
+&apps_rsc {
+ pm8998-rpmh-regulators {
+ vdd_l7_l12_l14_l15-supply = <&pm8998_s5>;
+
+ smps2 {
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ qcom,regulator-initial-voltage = <1100000>;
+ };
+
+ smps5 {
+ regulator-min-microvolt = <1904000>;
+ regulator-max-microvolt = <2040000>;
+ qcom,regulator-initial-voltage = <1904000>;
+ };
+
+ ldo1 {
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <880000>;
+ qcom,regulator-initial-voltage = <880000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ qcom,allowed-modes =
+ <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ qcom,mode-threshold-currents = <0 1>;
+ };
+
+ ldo2 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ qcom,regulator-initial-voltage = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ qcom,allowed-modes =
+ <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ qcom,mode-threshold-currents = <0 30000>;
+ };
+
+
+ ldo7 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ qcom,regulator-initial-voltage = <1800000>;
+ qcom,headroom-voltage = <56000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ regulator-allow-set-load;
+ qcom,allowed-modes =
+ <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ qcom,mode-threshold-currents = <0 10000>;
+ };
+
+ ldo20 {
+ regulator-min-microvolt = <2704000>;
+ regulator-max-microvolt = <2960000>;
+ qcom,regulator-initial-voltage = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ qcom,allowed-modes =
+ <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ qcom,mode-threshold-currents = <0 10000>;
+ };
+
+ ldo26 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ qcom,regulator-initial-voltage = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ qcom,allowed-modes =
+ <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ qcom,mode-threshold-currents = <0 1>;
+ };
+
+ lvs1 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ };
+
+ pmi8998-rpmh-regulators {
+ bob {
+ regulator-min-microvolt = <3312000>;
+ regulator-max-microvolt = <3600000>;
+ qcom,regulator-initial-voltage = <3312000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_PASS>;
+ };
+ };
+>>>>>>>
+};
diff --git a/rr-cache/5a7fbb8eb469ab7e1359820fd91982b9b6c7f2a6/preimage b/rr-cache/5a7fbb8eb469ab7e1359820fd91982b9b6c7f2a6/preimage
new file mode 100644
index 0000000..f921d1f
--- /dev/null
+++ b/rr-cache/5a7fbb8eb469ab7e1359820fd91982b9b6c7f2a6/preimage
@@ -0,0 +1,1205 @@
+/*
+ * Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+
+#include "clk-alpha-pll.h"
+#include "common.h"
+
+#define PLL_MODE(p) ((p)->offset + 0x0)
+# define PLL_OUTCTRL BIT(0)
+# define PLL_BYPASSNL BIT(1)
+# define PLL_RESET_N BIT(2)
+# define PLL_OFFLINE_REQ BIT(7)
+# define PLL_LOCK_COUNT_SHIFT 8
+# define PLL_LOCK_COUNT_MASK 0x3f
+# define PLL_BIAS_COUNT_SHIFT 14
+# define PLL_BIAS_COUNT_MASK 0x3f
+# define PLL_VOTE_FSM_ENA BIT(20)
+# define PLL_FSM_ENA BIT(20)
+# define PLL_VOTE_FSM_RESET BIT(21)
+# define PLL_UPDATE BIT(22)
+# define PLL_UPDATE_BYPASS BIT(23)
+# define PLL_OFFLINE_ACK BIT(28)
+# define ALPHA_PLL_ACK_LATCH BIT(29)
+# define PLL_ACTIVE_FLAG BIT(30)
+# define PLL_LOCK_DET BIT(31)
+
+#define PLL_L_VAL(p) ((p)->offset + (p)->regs[PLL_OFF_L_VAL])
+#define PLL_ALPHA_VAL(p) ((p)->offset + (p)->regs[PLL_OFF_ALPHA_VAL])
+#define PLL_ALPHA_VAL_U(p) ((p)->offset + (p)->regs[PLL_OFF_ALPHA_VAL_U])
+
+#define PLL_USER_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_USER_CTL])
+# define PLL_POST_DIV_SHIFT 8
+# define PLL_POST_DIV_MASK(p) GENMASK((p)->width, 0)
+# define PLL_ALPHA_EN BIT(24)
+# define PLL_ALPHA_MODE BIT(25)
+# define PLL_VCO_SHIFT 20
+# define PLL_VCO_MASK 0x3
+
+#define PLL_USER_CTL_U(p) ((p)->offset + (p)->regs[PLL_OFF_USER_CTL_U])
+
+#define PLL_CONFIG_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_CONFIG_CTL])
+#define PLL_CONFIG_CTL_U(p) ((p)->offset + (p)->regs[PLL_OFF_CONFIG_CTL_U])
+#define PLL_TEST_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL])
+#define PLL_TEST_CTL_U(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL_U])
+#define PLL_STATUS(p) ((p)->offset + (p)->regs[PLL_OFF_STATUS])
+#define PLL_OPMODE(p) ((p)->offset + (p)->regs[PLL_OFF_OPMODE])
+#define PLL_FRAC(p) ((p)->offset + (p)->regs[PLL_OFF_FRAC])
+
+const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
+ [CLK_ALPHA_PLL_TYPE_DEFAULT] = {
+ [PLL_OFF_L_VAL] = 0x04,
+ [PLL_OFF_ALPHA_VAL] = 0x08,
+ [PLL_OFF_ALPHA_VAL_U] = 0x0c,
+ [PLL_OFF_USER_CTL] = 0x10,
+ [PLL_OFF_USER_CTL_U] = 0x14,
+ [PLL_OFF_CONFIG_CTL] = 0x18,
+ [PLL_OFF_TEST_CTL] = 0x1c,
+ [PLL_OFF_TEST_CTL_U] = 0x20,
+ [PLL_OFF_STATUS] = 0x24,
+ },
+ [CLK_ALPHA_PLL_TYPE_HUAYRA] = {
+ [PLL_OFF_L_VAL] = 0x04,
+ [PLL_OFF_ALPHA_VAL] = 0x08,
+ [PLL_OFF_USER_CTL] = 0x10,
+ [PLL_OFF_CONFIG_CTL] = 0x14,
+ [PLL_OFF_CONFIG_CTL_U] = 0x18,
+ [PLL_OFF_TEST_CTL] = 0x1c,
+ [PLL_OFF_TEST_CTL_U] = 0x20,
+ [PLL_OFF_STATUS] = 0x24,
+ },
+ [CLK_ALPHA_PLL_TYPE_BRAMMO] = {
+ [PLL_OFF_L_VAL] = 0x04,
+ [PLL_OFF_ALPHA_VAL] = 0x08,
+ [PLL_OFF_ALPHA_VAL_U] = 0x0c,
+ [PLL_OFF_USER_CTL] = 0x10,
+ [PLL_OFF_CONFIG_CTL] = 0x18,
+ [PLL_OFF_TEST_CTL] = 0x1c,
+ [PLL_OFF_STATUS] = 0x24,
+ },
+ [CLK_ALPHA_PLL_TYPE_FABIA] = {
+ [PLL_OFF_L_VAL] = 0x04,
+ [PLL_OFF_USER_CTL] = 0x0c,
+ [PLL_OFF_USER_CTL_U] = 0x10,
+ [PLL_OFF_CONFIG_CTL] = 0x14,
+ [PLL_OFF_CONFIG_CTL_U] = 0x18,
+ [PLL_OFF_TEST_CTL] = 0x1c,
+ [PLL_OFF_TEST_CTL_U] = 0x20,
+ [PLL_OFF_STATUS] = 0x24,
+ [PLL_OFF_OPMODE] = 0x2c,
+ [PLL_OFF_FRAC] = 0x38,
+ },
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
+
+/*
+ * Even though 40 bits are present, use only 32 for ease of calculation.
+ */
+#define ALPHA_REG_BITWIDTH 40
+#define ALPHA_REG_16BIT_WIDTH 16
+#define ALPHA_BITWIDTH 32U
+#define ALPHA_SHIFT(w) min(w, ALPHA_BITWIDTH)
+
+#define PLL_HUAYRA_M_WIDTH 8
+#define PLL_HUAYRA_M_SHIFT 8
+#define PLL_HUAYRA_M_MASK 0xff
+#define PLL_HUAYRA_N_SHIFT 0
+#define PLL_HUAYRA_N_MASK 0xff
+#define PLL_HUAYRA_ALPHA_WIDTH 16
+
+#define FABIA_OPMODE_STANDBY 0x0
+#define FABIA_OPMODE_RUN 0x1
+
+#define FABIA_PLL_OUT_MASK 0x7
+#define FABIA_PLL_RATE_MARGIN 500
+
+#define pll_alpha_width(p) \
+ ((PLL_ALPHA_VAL_U(p) - PLL_ALPHA_VAL(p) == 4) ? \
+ ALPHA_REG_BITWIDTH : ALPHA_REG_16BIT_WIDTH)
+
+#define pll_has_64bit_config(p) ((PLL_CONFIG_CTL_U(p) - PLL_CONFIG_CTL(p)) == 4)
+
+#define to_clk_alpha_pll(_hw) container_of(to_clk_regmap(_hw), \
+ struct clk_alpha_pll, clkr)
+
+#define to_clk_alpha_pll_postdiv(_hw) container_of(to_clk_regmap(_hw), \
+ struct clk_alpha_pll_postdiv, clkr)
+
+static int wait_for_pll(struct clk_alpha_pll *pll, u32 mask, bool inverse,
+ const char *action)
+{
+ u32 val;
+ int count;
+ int ret;
+ const char *name = clk_hw_get_name(&pll->clkr.hw);
+
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return ret;
+
+ for (count = 100; count > 0; count--) {
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return ret;
+ if (inverse && !(val & mask))
+ return 0;
+ else if ((val & mask) == mask)
+ return 0;
+
+ udelay(1);
+ }
+
+ WARN(1, "%s failed to %s!\n", name, action);
+ return -ETIMEDOUT;
+}
+
+#define wait_for_pll_enable_active(pll) \
+ wait_for_pll(pll, PLL_ACTIVE_FLAG, 0, "enable")
+
+#define wait_for_pll_enable_lock(pll) \
+ wait_for_pll(pll, PLL_LOCK_DET, 0, "enable")
+
+#define wait_for_pll_disable(pll) \
+ wait_for_pll(pll, PLL_ACTIVE_FLAG, 1, "disable")
+
+#define wait_for_pll_offline(pll) \
+ wait_for_pll(pll, PLL_OFFLINE_ACK, 0, "offline")
+
+#define wait_for_pll_update(pll) \
+ wait_for_pll(pll, PLL_UPDATE, 1, "update")
+
+#define wait_for_pll_update_ack_set(pll) \
+ wait_for_pll(pll, ALPHA_PLL_ACK_LATCH, 0, "update_ack_set")
+
+#define wait_for_pll_update_ack_clear(pll) \
+ wait_for_pll(pll, ALPHA_PLL_ACK_LATCH, 1, "update_ack_clear")
+
+void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config)
+{
+ u32 val, mask;
+
+ regmap_write(regmap, PLL_L_VAL(pll), config->l);
+ regmap_write(regmap, PLL_ALPHA_VAL(pll), config->alpha);
+ regmap_write(regmap, PLL_CONFIG_CTL(pll), config->config_ctl_val);
+
+ if (pll_has_64bit_config(pll))
+ regmap_write(regmap, PLL_CONFIG_CTL_U(pll),
+ config->config_ctl_hi_val);
+
+ if (pll_alpha_width(pll) > 32)
+ regmap_write(regmap, PLL_ALPHA_VAL_U(pll), config->alpha_hi);
+
+ val = config->main_output_mask;
+ val |= config->aux_output_mask;
+ val |= config->aux2_output_mask;
+ val |= config->early_output_mask;
+ val |= config->pre_div_val;
+ val |= config->post_div_val;
+ val |= config->vco_val;
+ val |= config->alpha_en_mask;
+ val |= config->alpha_mode_mask;
+
+ mask = config->main_output_mask;
+ mask |= config->aux_output_mask;
+ mask |= config->aux2_output_mask;
+ mask |= config->early_output_mask;
+ mask |= config->pre_div_mask;
+ mask |= config->post_div_mask;
+ mask |= config->vco_mask;
+
+ regmap_update_bits(regmap, PLL_USER_CTL(pll), mask, val);
+
+ if (pll->flags & SUPPORTS_FSM_MODE)
+ qcom_pll_set_fsm_mode(regmap, PLL_MODE(pll), 6, 0);
+}
+EXPORT_SYMBOL_GPL(clk_alpha_pll_configure);
+
+static int clk_alpha_pll_hwfsm_enable(struct clk_hw *hw)
+{
+ int ret;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val;
+
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return ret;
+
+ val |= PLL_FSM_ENA;
+
+ if (pll->flags & SUPPORTS_OFFLINE_REQ)
+ val &= ~PLL_OFFLINE_REQ;
+
+ ret = regmap_write(pll->clkr.regmap, PLL_MODE(pll), val);
+ if (ret)
+ return ret;
+
+ /* Make sure enable request goes through before waiting for update */
+ mb();
+
+ return wait_for_pll_enable_active(pll);
+}
+
+static void clk_alpha_pll_hwfsm_disable(struct clk_hw *hw)
+{
+ int ret;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val;
+
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return;
+
+ if (pll->flags & SUPPORTS_OFFLINE_REQ) {
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
+ PLL_OFFLINE_REQ, PLL_OFFLINE_REQ);
+ if (ret)
+ return;
+
+ ret = wait_for_pll_offline(pll);
+ if (ret)
+ return;
+ }
+
+ /* Disable hwfsm */
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
+ PLL_FSM_ENA, 0);
+ if (ret)
+ return;
+
+ wait_for_pll_disable(pll);
+}
+
+static int pll_is_enabled(struct clk_hw *hw, u32 mask)
+{
+ int ret;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val;
+
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return ret;
+
+ return !!(val & mask);
+}
+
+static int clk_alpha_pll_hwfsm_is_enabled(struct clk_hw *hw)
+{
+ return pll_is_enabled(hw, PLL_ACTIVE_FLAG);
+}
+
+static int clk_alpha_pll_is_enabled(struct clk_hw *hw)
+{
+ return pll_is_enabled(hw, PLL_LOCK_DET);
+}
+
+static int clk_alpha_pll_enable(struct clk_hw *hw)
+{
+ int ret;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val, mask;
+
+ mask = PLL_OUTCTRL | PLL_RESET_N | PLL_BYPASSNL;
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return ret;
+
+ /* If in FSM mode, just vote for it */
+ if (val & PLL_VOTE_FSM_ENA) {
+ ret = clk_enable_regmap(hw);
+ if (ret)
+ return ret;
+ return wait_for_pll_enable_active(pll);
+ }
+
+ /* Skip if already enabled */
+ if ((val & mask) == mask)
+ return 0;
+
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
+ PLL_BYPASSNL, PLL_BYPASSNL);
+ if (ret)
+ return ret;
+
+ /*
+ * H/W requires a 5us delay between disabling the bypass and
+ * de-asserting the reset.
+ */
+ mb();
+ udelay(5);
+
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
+ PLL_RESET_N, PLL_RESET_N);
+ if (ret)
+ return ret;
+
+ ret = wait_for_pll_enable_lock(pll);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
+ PLL_OUTCTRL, PLL_OUTCTRL);
+
+ /* Ensure that the write above goes through before returning. */
+ mb();
+ return ret;
+}
+
+static void clk_alpha_pll_disable(struct clk_hw *hw)
+{
+ int ret;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val, mask;
+
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return;
+
+ /* If in FSM mode, just unvote it */
+ if (val & PLL_VOTE_FSM_ENA) {
+ clk_disable_regmap(hw);
+ return;
+ }
+
+ mask = PLL_OUTCTRL;
+ regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), mask, 0);
+
+ /* Delay of 2 output clock ticks required until output is disabled */
+ mb();
+ udelay(1);
+
+ mask = PLL_RESET_N | PLL_BYPASSNL;
+ regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), mask, 0);
+}
+
+static unsigned long
+alpha_pll_calc_rate(u64 prate, u32 l, u32 a, u32 alpha_width)
+{
+ return (prate * l) + ((prate * a) >> ALPHA_SHIFT(alpha_width));
+}
+
+static unsigned long
+alpha_pll_round_rate(unsigned long rate, unsigned long prate, u32 *l, u64 *a,
+ u32 alpha_width)
+{
+ u64 remainder;
+ u64 quotient;
+
+ quotient = rate;
+ remainder = do_div(quotient, prate);
+ *l = quotient;
+
+ if (!remainder) {
+ *a = 0;
+ return rate;
+ }
+
+ /* Upper ALPHA_BITWIDTH bits of Alpha */
+ quotient = remainder << ALPHA_SHIFT(alpha_width);
+
+ remainder = do_div(quotient, prate);
+
+ if (remainder)
+ quotient++;
+
+ *a = quotient;
+ return alpha_pll_calc_rate(prate, *l, *a, alpha_width);
+}
+
+static const struct pll_vco *
+alpha_pll_find_vco(const struct clk_alpha_pll *pll, unsigned long rate)
+{
+ const struct pll_vco *v = pll->vco_table;
+ const struct pll_vco *end = v + pll->num_vco;
+
+ for (; v < end; v++)
+ if (rate >= v->min_freq && rate <= v->max_freq)
+ return v;
+
+ return NULL;
+}
+
+static unsigned long
+clk_alpha_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ u32 l, low, high, ctl;
+ u64 a = 0, prate = parent_rate;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 alpha_width = pll_alpha_width(pll);
+
+ regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
+
+ regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
+ if (ctl & PLL_ALPHA_EN) {
+ regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &low);
+ if (alpha_width > 32) {
+ regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll),
+ &high);
+ a = (u64)high << 32 | low;
+ } else {
+ a = low & GENMASK(alpha_width - 1, 0);
+ }
+
+ if (alpha_width > ALPHA_BITWIDTH)
+ a >>= alpha_width - ALPHA_BITWIDTH;
+ }
+
+ return alpha_pll_calc_rate(prate, l, a, alpha_width);
+}
+
+
+static int __clk_alpha_pll_update_latch(struct clk_alpha_pll *pll)
+{
+ int ret;
+ u32 mode;
+
+ regmap_read(pll->clkr.regmap, PLL_MODE(pll), &mode);
+
+ /* Latch the input to the PLL */
+ regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), PLL_UPDATE,
+ PLL_UPDATE);
+
+ /* Wait for 2 reference cycle before checking ACK bit */
+ udelay(1);
+
+ /*
+ * PLL will latch the new L, Alpha and freq control word.
+ * PLL will respond by raising PLL_ACK_LATCH output when new programming
+ * has been latched in and PLL is being updated. When
+ * UPDATE_LOGIC_BYPASS bit is not set, PLL_UPDATE will be cleared
+ * automatically by hardware when PLL_ACK_LATCH is asserted by PLL.
+ */
+ if (mode & PLL_UPDATE_BYPASS) {
+ ret = wait_for_pll_update_ack_set(pll);
+ if (ret)
+ return ret;
+
+ regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), PLL_UPDATE, 0);
+ } else {
+ ret = wait_for_pll_update(pll);
+ if (ret)
+ return ret;
+ }
+
+ ret = wait_for_pll_update_ack_clear(pll);
+ if (ret)
+ return ret;
+
+ /* Wait for PLL output to stabilize */
+ udelay(10);
+
+ return 0;
+}
+
+static int clk_alpha_pll_update_latch(struct clk_alpha_pll *pll,
+ int (*is_enabled)(struct clk_hw *))
+{
+ if (!is_enabled(&pll->clkr.hw) ||
+ !(pll->flags & SUPPORTS_DYNAMIC_UPDATE))
+ return 0;
+
+ return __clk_alpha_pll_update_latch(pll);
+}
+
+static int __clk_alpha_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate,
+ int (*is_enabled)(struct clk_hw *))
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ const struct pll_vco *vco;
+ u32 l, alpha_width = pll_alpha_width(pll);
+ u64 a;
+
+ rate = alpha_pll_round_rate(rate, prate, &l, &a, alpha_width);
+ vco = alpha_pll_find_vco(pll, rate);
+ if (pll->vco_table && !vco) {
+ pr_err("alpha pll not in a valid vco range\n");
+ return -EINVAL;
+ }
+
+ regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
+
+ if (alpha_width > ALPHA_BITWIDTH)
+ a <<= alpha_width - ALPHA_BITWIDTH;
+
+ if (alpha_width > 32)
+ regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll), a >> 32);
+
+ regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a);
+
+ if (vco) {
+ regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
+ PLL_VCO_MASK << PLL_VCO_SHIFT,
+ vco->val << PLL_VCO_SHIFT);
+ }
+
+ regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
+ PLL_ALPHA_EN, PLL_ALPHA_EN);
+
+ return clk_alpha_pll_update_latch(pll, is_enabled);
+}
+
+static int clk_alpha_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ return __clk_alpha_pll_set_rate(hw, rate, prate,
+ clk_alpha_pll_is_enabled);
+}
+
+static int clk_alpha_pll_hwfsm_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ return __clk_alpha_pll_set_rate(hw, rate, prate,
+ clk_alpha_pll_hwfsm_is_enabled);
+}
+
+static long clk_alpha_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 l, alpha_width = pll_alpha_width(pll);
+ u64 a;
+ unsigned long min_freq, max_freq;
+
+ rate = alpha_pll_round_rate(rate, *prate, &l, &a, alpha_width);
+ if (!pll->vco_table || alpha_pll_find_vco(pll, rate))
+ return rate;
+
+ min_freq = pll->vco_table[0].min_freq;
+ max_freq = pll->vco_table[pll->num_vco - 1].max_freq;
+
+ return clamp(rate, min_freq, max_freq);
+}
+
+static unsigned long
+alpha_huayra_pll_calc_rate(u64 prate, u32 l, u32 a)
+{
+ /*
+ * a contains 16 bit alpha_val in two’s compliment number in the range
+ * of [-0.5, 0.5).
+ */
+ if (a >= BIT(PLL_HUAYRA_ALPHA_WIDTH - 1))
+ l -= 1;
+
+ return (prate * l) + (prate * a >> PLL_HUAYRA_ALPHA_WIDTH);
+}
+
+static unsigned long
+alpha_huayra_pll_round_rate(unsigned long rate, unsigned long prate,
+ u32 *l, u32 *a)
+{
+ u64 remainder;
+ u64 quotient;
+
+ quotient = rate;
+ remainder = do_div(quotient, prate);
+ *l = quotient;
+
+ if (!remainder) {
+ *a = 0;
+ return rate;
+ }
+
+ quotient = remainder << PLL_HUAYRA_ALPHA_WIDTH;
+ remainder = do_div(quotient, prate);
+
+ if (remainder)
+ quotient++;
+
+ /*
+ * alpha_val should be in two’s compliment number in the range
+ * of [-0.5, 0.5) so if quotient >= 0.5 then increment the l value
+ * since alpha value will be subtracted in this case.
+ */
+ if (quotient >= BIT(PLL_HUAYRA_ALPHA_WIDTH - 1))
+ *l += 1;
+
+ *a = quotient;
+ return alpha_huayra_pll_calc_rate(prate, *l, *a);
+}
+
+static unsigned long
+alpha_pll_huayra_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ u64 rate = parent_rate, tmp;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 l, alpha = 0, ctl, alpha_m, alpha_n;
+
+ regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
+ regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
+
+ if (ctl & PLL_ALPHA_EN) {
+ regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &alpha);
+ /*
+ * Depending upon alpha_mode, it can be treated as M/N value or
+ * as a two’s compliment number. When alpha_mode=1,
+ * pll_alpha_val<15:8>=M and pll_apla_val<7:0>=N
+ *
+ * Fout=FIN*(L+(M/N))
+ *
+ * M is a signed number (-128 to 127) and N is unsigned
+ * (0 to 255). M/N has to be within +/-0.5.
+ *
+ * When alpha_mode=0, it is a two’s compliment number in the
+ * range [-0.5, 0.5).
+ *
+ * Fout=FIN*(L+(alpha_val)/2^16)
+ *
+ * where alpha_val is two’s compliment number.
+ */
+ if (!(ctl & PLL_ALPHA_MODE))
+ return alpha_huayra_pll_calc_rate(rate, l, alpha);
+
+ alpha_m = alpha >> PLL_HUAYRA_M_SHIFT & PLL_HUAYRA_M_MASK;
+ alpha_n = alpha >> PLL_HUAYRA_N_SHIFT & PLL_HUAYRA_N_MASK;
+
+ rate *= l;
+ tmp = parent_rate;
+ if (alpha_m >= BIT(PLL_HUAYRA_M_WIDTH - 1)) {
+ alpha_m = BIT(PLL_HUAYRA_M_WIDTH) - alpha_m;
+ tmp *= alpha_m;
+ do_div(tmp, alpha_n);
+ rate -= tmp;
+ } else {
+ tmp *= alpha_m;
+ do_div(tmp, alpha_n);
+ rate += tmp;
+ }
+
+ return rate;
+ }
+
+ return alpha_huayra_pll_calc_rate(rate, l, alpha);
+}
+
+static int alpha_pll_huayra_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 l, a, ctl, cur_alpha = 0;
+
+ rate = alpha_huayra_pll_round_rate(rate, prate, &l, &a);
+
+ regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
+
+ if (ctl & PLL_ALPHA_EN)
+ regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &cur_alpha);
+
+ /*
+ * Huayra PLL supports PLL dynamic programming. User can change L_VAL,
+ * without having to go through the power on sequence.
+ */
+ if (clk_alpha_pll_is_enabled(hw)) {
+ if (cur_alpha != a) {
+ pr_err("clock needs to be gated %s\n",
+ clk_hw_get_name(hw));
+ return -EBUSY;
+ }
+
+ regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
+ /* Ensure that the write above goes to detect L val change. */
+ mb();
+ return wait_for_pll_enable_lock(pll);
+ }
+
+ regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
+ regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a);
+
+ if (a == 0)
+ regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
+ PLL_ALPHA_EN, 0x0);
+ else
+ regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
+ PLL_ALPHA_EN | PLL_ALPHA_MODE, PLL_ALPHA_EN);
+
+ return 0;
+}
+
+static long alpha_pll_huayra_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ u32 l, a;
+
+ return alpha_huayra_pll_round_rate(rate, *prate, &l, &a);
+}
+
+const struct clk_ops clk_alpha_pll_ops = {
+ .enable = clk_alpha_pll_enable,
+ .disable = clk_alpha_pll_disable,
+ .is_enabled = clk_alpha_pll_is_enabled,
+ .recalc_rate = clk_alpha_pll_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+ .set_rate = clk_alpha_pll_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_ops);
+
+const struct clk_ops clk_alpha_pll_huayra_ops = {
+ .enable = clk_alpha_pll_enable,
+ .disable = clk_alpha_pll_disable,
+ .is_enabled = clk_alpha_pll_is_enabled,
+ .recalc_rate = alpha_pll_huayra_recalc_rate,
+ .round_rate = alpha_pll_huayra_round_rate,
+ .set_rate = alpha_pll_huayra_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_huayra_ops);
+
+const struct clk_ops clk_alpha_pll_hwfsm_ops = {
+ .enable = clk_alpha_pll_hwfsm_enable,
+ .disable = clk_alpha_pll_hwfsm_disable,
+ .is_enabled = clk_alpha_pll_hwfsm_is_enabled,
+ .recalc_rate = clk_alpha_pll_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+ .set_rate = clk_alpha_pll_hwfsm_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_hwfsm_ops);
+
+static unsigned long
+clk_alpha_pll_postdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+ u32 ctl;
+
+ regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
+
+ ctl >>= PLL_POST_DIV_SHIFT;
+ ctl &= PLL_POST_DIV_MASK(pll);
+
+ return parent_rate >> fls(ctl);
+}
+
+static const struct clk_div_table clk_alpha_div_table[] = {
+ { 0x0, 1 },
+ { 0x1, 2 },
+ { 0x3, 4 },
+ { 0x7, 8 },
+ { 0xf, 16 },
+ { }
+};
+
+static const struct clk_div_table clk_alpha_2bit_div_table[] = {
+ { 0x0, 1 },
+ { 0x1, 2 },
+ { 0x3, 4 },
+ { }
+};
+
+static long
+clk_alpha_pll_postdiv_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+ const struct clk_div_table *table;
+
+ if (pll->width == 2)
+ table = clk_alpha_2bit_div_table;
+ else
+ table = clk_alpha_div_table;
+
+ return divider_round_rate(hw, rate, prate, table,
+ pll->width, CLK_DIVIDER_POWER_OF_TWO);
+}
+
+static long
+clk_alpha_pll_postdiv_round_ro_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+ u32 ctl, div;
+
+ regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
+
+ ctl >>= PLL_POST_DIV_SHIFT;
+ ctl &= BIT(pll->width) - 1;
+ div = 1 << fls(ctl);
+
+ if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)
+ *prate = clk_hw_round_rate(clk_hw_get_parent(hw), div * rate);
+
+ return DIV_ROUND_UP_ULL((u64)*prate, div);
+}
+
+static int clk_alpha_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+ int div;
+
+ /* 16 -> 0xf, 8 -> 0x7, 4 -> 0x3, 2 -> 0x1, 1 -> 0x0 */
+ div = DIV_ROUND_UP_ULL((u64)parent_rate, rate) - 1;
+
+ return regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
+ PLL_POST_DIV_MASK(pll) << PLL_POST_DIV_SHIFT,
+ div << PLL_POST_DIV_SHIFT);
+}
+
+const struct clk_ops clk_alpha_pll_postdiv_ops = {
+ .recalc_rate = clk_alpha_pll_postdiv_recalc_rate,
+ .round_rate = clk_alpha_pll_postdiv_round_rate,
+ .set_rate = clk_alpha_pll_postdiv_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_ops);
+
+const struct clk_ops clk_alpha_pll_postdiv_ro_ops = {
+ .round_rate = clk_alpha_pll_postdiv_round_ro_rate,
+ .recalc_rate = clk_alpha_pll_postdiv_recalc_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_ro_ops);
+
+void clk_fabia_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config)
+{
+ u32 val, mask;
+
+ if (config->l)
+ regmap_write(regmap, PLL_L_VAL(pll), config->l);
+
+ if (config->alpha)
+ regmap_write(regmap, PLL_FRAC(pll), config->alpha);
+
+ if (config->config_ctl_val)
+ regmap_write(regmap, PLL_CONFIG_CTL(pll),
+ config->config_ctl_val);
+
+ if (config->post_div_mask) {
+ mask = config->post_div_mask;
+ val = config->post_div_val;
+ regmap_update_bits(regmap, PLL_USER_CTL(pll), mask, val);
+ }
+
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_UPDATE_BYPASS,
+ PLL_UPDATE_BYPASS);
+
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N);
+}
+<<<<<<<
+=======
+EXPORT_SYMBOL_GPL(clk_fabia_pll_configure);
+>>>>>>>
+
+static int alpha_pll_fabia_enable(struct clk_hw *hw)
+{
+ int ret;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val, opmode_val;
+<<<<<<<
+ struct regmap *regmap = pll->clkr.regmap;
+
+ ret = regmap_read(regmap, PLL_MODE(pll), &val);
+=======
+
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+>>>>>>>
+ if (ret)
+ return ret;
+
+ /* If in FSM mode, just vote for it */
+ if (val & PLL_VOTE_FSM_ENA) {
+ ret = clk_enable_regmap(hw);
+ if (ret)
+ return ret;
+ return wait_for_pll_enable_active(pll);
+ }
+
+<<<<<<<
+ /* Read opmode value */
+ ret = regmap_read(pll->clkr.regmap, PLL_OPMODE(pll), &opmode_val);
+=======
+ ret = regmap_read(regmap, PLL_OPMODE(pll), &opmode_val);
+>>>>>>>
+ if (ret)
+ return ret;
+
+ /* Skip If PLL is already running */
+ if ((opmode_val & FABIA_OPMODE_RUN) && (val & PLL_OUTCTRL))
+ return 0;
+
+<<<<<<<
+ /* Disable PLL output */
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
+ PLL_OUTCTRL, 0);
+ if (ret)
+ return ret;
+
+ /* Set Operation mode to STANBY */
+ ret = regmap_write(pll->clkr.regmap, PLL_OPMODE(pll),
+ FABIA_OPMODE_STANDBY);
+ if (ret)
+ return ret;
+
+ /* PLL should be in STANDBY mode before continuing */
+ mb();
+
+ /* Bring PLL out of reset */
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
+ PLL_RESET_N, PLL_RESET_N);
+ if (ret)
+ return ret;
+
+ /* Set Operation mode to RUN */
+ ret = regmap_write(pll->clkr.regmap, PLL_OPMODE(pll),
+ FABIA_OPMODE_RUN);
+=======
+ ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(regmap, PLL_OPMODE(pll), FABIA_OPMODE_STANDBY);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N,
+ PLL_RESET_N);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(regmap, PLL_OPMODE(pll), FABIA_OPMODE_RUN);
+>>>>>>>
+ if (ret)
+ return ret;
+
+ ret = wait_for_pll_enable_lock(pll);
+ if (ret)
+ return ret;
+
+<<<<<<<
+ /* Enable the main PLL output */
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
+ FABIA_PLL_OUT_MASK, FABIA_PLL_OUT_MASK);
+ if (ret)
+ return ret;
+
+ /* Enable PLL outputs */
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
+ PLL_OUTCTRL, PLL_OUTCTRL);
+ if (ret)
+ return ret;
+
+ /* Ensure that the write above goes through before returning. */
+ mb();
+
+ return ret;
+=======
+ ret = regmap_update_bits(regmap, PLL_USER_CTL(pll),
+ FABIA_PLL_OUT_MASK, FABIA_PLL_OUT_MASK);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL,
+ PLL_OUTCTRL);
+>>>>>>>
+}
+
+static void alpha_pll_fabia_disable(struct clk_hw *hw)
+{
+ int ret;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val;
+<<<<<<<
+ struct regmap *regmap = pll->clkr.regmap;
+
+ ret = regmap_read(regmap, PLL_MODE(pll), &val);
+=======
+
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+>>>>>>>
+ if (ret)
+ return;
+
+ /* If in FSM mode, just unvote it */
+ if (val & PLL_FSM_ENA) {
+ clk_disable_regmap(hw);
+ return;
+ }
+
+<<<<<<<
+ /* Disable PLL outputs */
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
+ PLL_OUTCTRL, 0);
+=======
+ ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0);
+>>>>>>>
+ if (ret)
+ return;
+
+ /* Disable main outputs */
+<<<<<<<
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
+ FABIA_PLL_OUT_MASK, 0);
+=======
+ ret = regmap_update_bits(regmap, PLL_USER_CTL(pll), FABIA_PLL_OUT_MASK,
+ 0);
+>>>>>>>
+ if (ret)
+ return;
+
+ /* Place the PLL in STANDBY */
+<<<<<<<
+ regmap_write(regmap, PLL_OPMODE(pll), FABIA_OPMODE_STANDBY);
+=======
+ ret = regmap_write(pll->clkr.regmap, PLL_OPMODE(pll),
+ FABIA_OPMODE_STANDBY);
+ if (ret)
+ return;
+>>>>>>>
+}
+
+static unsigned long alpha_pll_fabia_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 l, frac, alpha_width = pll_alpha_width(pll);
+
+ regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
+ regmap_read(pll->clkr.regmap, PLL_FRAC(pll), &frac);
+
+ return alpha_pll_calc_rate(parent_rate, l, frac, alpha_width);
+}
+
+static int alpha_pll_fabia_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val, l, alpha_width = pll_alpha_width(pll);
+ u64 a;
+ unsigned long rrate;
+ int ret = 0;
+
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return ret;
+
+ rrate = alpha_pll_round_rate(rate, prate, &l, &a, alpha_width);
+
+ /*
+ * Due to limited number of bits for fractional rate programming, the
+ * rounded up rate could be marginally higher than the requested rate.
+ */
+ if (rrate > (rate + FABIA_PLL_RATE_MARGIN) || rrate < rate) {
+ pr_err("Call set rate on the PLL with rounded rates!\n");
+ return -EINVAL;
+ }
+
+ regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
+ regmap_write(pll->clkr.regmap, PLL_FRAC(pll), a);
+
+ return __clk_alpha_pll_update_latch(pll);
+}
+
+const struct clk_ops clk_alpha_pll_fabia_ops = {
+ .enable = alpha_pll_fabia_enable,
+ .disable = alpha_pll_fabia_disable,
+ .is_enabled = clk_alpha_pll_is_enabled,
+ .set_rate = alpha_pll_fabia_set_rate,
+ .recalc_rate = alpha_pll_fabia_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_fabia_ops);
+
+const struct clk_ops clk_alpha_pll_fixed_fabia_ops = {
+ .enable = alpha_pll_fabia_enable,
+ .disable = alpha_pll_fabia_disable,
+ .is_enabled = clk_alpha_pll_is_enabled,
+ .recalc_rate = alpha_pll_fabia_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_fixed_fabia_ops);
+
+static unsigned long clk_alpha_pll_postdiv_fabia_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+ u32 i, div = 1, val;
+ int ret;
+
+ if (!pll->post_div_table) {
+ pr_err("Missing the post_div_table for the PLL\n");
+ return -EINVAL;
+ }
+
+ ret = regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &val);
+ if (ret)
+ return ret;
+
+ val >>= pll->post_div_shift;
+ val &= BIT(pll->width) - 1;
+
+ for (i = 0; i < pll->num_post_div; i++) {
+ if (pll->post_div_table[i].val == val) {
+ div = pll->post_div_table[i].div;
+ break;
+ }
+ }
+
+ return (parent_rate / div);
+}
+
+static long clk_alpha_pll_postdiv_fabia_round_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long *prate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+
+ if (!pll->post_div_table) {
+ pr_err("Missing the post_div_table for the PLL\n");
+ return -EINVAL;
+ }
+
+ return divider_round_rate(hw, rate, prate, pll->post_div_table,
+ pll->width, CLK_DIVIDER_ROUND_CLOSEST);
+}
+
+static int clk_alpha_pll_postdiv_fabia_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+ int i, val = 0, div, ret;
+
+ /*
+ * If the PLL is in FSM mode, then treat set_rate callback as a
+ * no-operation.
+ */
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return ret;
+
+ if (val & PLL_VOTE_FSM_ENA)
+ return 0;
+
+ if (!pll->post_div_table) {
+ pr_err("Missing the post_div_table for the PLL\n");
+ return -EINVAL;
+ }
+
+ div = DIV_ROUND_UP_ULL((u64)parent_rate, rate);
+ for (i = 0; i < pll->num_post_div; i++) {
+ if (pll->post_div_table[i].div == div) {
+ val = pll->post_div_table[i].val;
+ break;
+ }
+ }
+
+ return regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
+ (BIT(pll->width) - 1) << pll->post_div_shift,
+ val << pll->post_div_shift);
+}
+
+const struct clk_ops clk_alpha_pll_postdiv_fabia_ops = {
+ .recalc_rate = clk_alpha_pll_postdiv_fabia_recalc_rate,
+ .round_rate = clk_alpha_pll_postdiv_fabia_round_rate,
+ .set_rate = clk_alpha_pll_postdiv_fabia_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_fabia_ops);
diff --git a/rr-cache/5a7fbb8eb469ab7e1359820fd91982b9b6c7f2a6/preimage.1 b/rr-cache/5a7fbb8eb469ab7e1359820fd91982b9b6c7f2a6/preimage.1
new file mode 100644
index 0000000..f921d1f
--- /dev/null
+++ b/rr-cache/5a7fbb8eb469ab7e1359820fd91982b9b6c7f2a6/preimage.1
@@ -0,0 +1,1205 @@
+/*
+ * Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+
+#include "clk-alpha-pll.h"
+#include "common.h"
+
+#define PLL_MODE(p) ((p)->offset + 0x0)
+# define PLL_OUTCTRL BIT(0)
+# define PLL_BYPASSNL BIT(1)
+# define PLL_RESET_N BIT(2)
+# define PLL_OFFLINE_REQ BIT(7)
+# define PLL_LOCK_COUNT_SHIFT 8
+# define PLL_LOCK_COUNT_MASK 0x3f
+# define PLL_BIAS_COUNT_SHIFT 14
+# define PLL_BIAS_COUNT_MASK 0x3f
+# define PLL_VOTE_FSM_ENA BIT(20)
+# define PLL_FSM_ENA BIT(20)
+# define PLL_VOTE_FSM_RESET BIT(21)
+# define PLL_UPDATE BIT(22)
+# define PLL_UPDATE_BYPASS BIT(23)
+# define PLL_OFFLINE_ACK BIT(28)
+# define ALPHA_PLL_ACK_LATCH BIT(29)
+# define PLL_ACTIVE_FLAG BIT(30)
+# define PLL_LOCK_DET BIT(31)
+
+#define PLL_L_VAL(p) ((p)->offset + (p)->regs[PLL_OFF_L_VAL])
+#define PLL_ALPHA_VAL(p) ((p)->offset + (p)->regs[PLL_OFF_ALPHA_VAL])
+#define PLL_ALPHA_VAL_U(p) ((p)->offset + (p)->regs[PLL_OFF_ALPHA_VAL_U])
+
+#define PLL_USER_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_USER_CTL])
+# define PLL_POST_DIV_SHIFT 8
+# define PLL_POST_DIV_MASK(p) GENMASK((p)->width, 0)
+# define PLL_ALPHA_EN BIT(24)
+# define PLL_ALPHA_MODE BIT(25)
+# define PLL_VCO_SHIFT 20
+# define PLL_VCO_MASK 0x3
+
+#define PLL_USER_CTL_U(p) ((p)->offset + (p)->regs[PLL_OFF_USER_CTL_U])
+
+#define PLL_CONFIG_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_CONFIG_CTL])
+#define PLL_CONFIG_CTL_U(p) ((p)->offset + (p)->regs[PLL_OFF_CONFIG_CTL_U])
+#define PLL_TEST_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL])
+#define PLL_TEST_CTL_U(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL_U])
+#define PLL_STATUS(p) ((p)->offset + (p)->regs[PLL_OFF_STATUS])
+#define PLL_OPMODE(p) ((p)->offset + (p)->regs[PLL_OFF_OPMODE])
+#define PLL_FRAC(p) ((p)->offset + (p)->regs[PLL_OFF_FRAC])
+
+const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
+ [CLK_ALPHA_PLL_TYPE_DEFAULT] = {
+ [PLL_OFF_L_VAL] = 0x04,
+ [PLL_OFF_ALPHA_VAL] = 0x08,
+ [PLL_OFF_ALPHA_VAL_U] = 0x0c,
+ [PLL_OFF_USER_CTL] = 0x10,
+ [PLL_OFF_USER_CTL_U] = 0x14,
+ [PLL_OFF_CONFIG_CTL] = 0x18,
+ [PLL_OFF_TEST_CTL] = 0x1c,
+ [PLL_OFF_TEST_CTL_U] = 0x20,
+ [PLL_OFF_STATUS] = 0x24,
+ },
+ [CLK_ALPHA_PLL_TYPE_HUAYRA] = {
+ [PLL_OFF_L_VAL] = 0x04,
+ [PLL_OFF_ALPHA_VAL] = 0x08,
+ [PLL_OFF_USER_CTL] = 0x10,
+ [PLL_OFF_CONFIG_CTL] = 0x14,
+ [PLL_OFF_CONFIG_CTL_U] = 0x18,
+ [PLL_OFF_TEST_CTL] = 0x1c,
+ [PLL_OFF_TEST_CTL_U] = 0x20,
+ [PLL_OFF_STATUS] = 0x24,
+ },
+ [CLK_ALPHA_PLL_TYPE_BRAMMO] = {
+ [PLL_OFF_L_VAL] = 0x04,
+ [PLL_OFF_ALPHA_VAL] = 0x08,
+ [PLL_OFF_ALPHA_VAL_U] = 0x0c,
+ [PLL_OFF_USER_CTL] = 0x10,
+ [PLL_OFF_CONFIG_CTL] = 0x18,
+ [PLL_OFF_TEST_CTL] = 0x1c,
+ [PLL_OFF_STATUS] = 0x24,
+ },
+ [CLK_ALPHA_PLL_TYPE_FABIA] = {
+ [PLL_OFF_L_VAL] = 0x04,
+ [PLL_OFF_USER_CTL] = 0x0c,
+ [PLL_OFF_USER_CTL_U] = 0x10,
+ [PLL_OFF_CONFIG_CTL] = 0x14,
+ [PLL_OFF_CONFIG_CTL_U] = 0x18,
+ [PLL_OFF_TEST_CTL] = 0x1c,
+ [PLL_OFF_TEST_CTL_U] = 0x20,
+ [PLL_OFF_STATUS] = 0x24,
+ [PLL_OFF_OPMODE] = 0x2c,
+ [PLL_OFF_FRAC] = 0x38,
+ },
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
+
+/*
+ * Even though 40 bits are present, use only 32 for ease of calculation.
+ */
+#define ALPHA_REG_BITWIDTH 40
+#define ALPHA_REG_16BIT_WIDTH 16
+#define ALPHA_BITWIDTH 32U
+#define ALPHA_SHIFT(w) min(w, ALPHA_BITWIDTH)
+
+#define PLL_HUAYRA_M_WIDTH 8
+#define PLL_HUAYRA_M_SHIFT 8
+#define PLL_HUAYRA_M_MASK 0xff
+#define PLL_HUAYRA_N_SHIFT 0
+#define PLL_HUAYRA_N_MASK 0xff
+#define PLL_HUAYRA_ALPHA_WIDTH 16
+
+#define FABIA_OPMODE_STANDBY 0x0
+#define FABIA_OPMODE_RUN 0x1
+
+#define FABIA_PLL_OUT_MASK 0x7
+#define FABIA_PLL_RATE_MARGIN 500
+
+#define pll_alpha_width(p) \
+ ((PLL_ALPHA_VAL_U(p) - PLL_ALPHA_VAL(p) == 4) ? \
+ ALPHA_REG_BITWIDTH : ALPHA_REG_16BIT_WIDTH)
+
+#define pll_has_64bit_config(p) ((PLL_CONFIG_CTL_U(p) - PLL_CONFIG_CTL(p)) == 4)
+
+#define to_clk_alpha_pll(_hw) container_of(to_clk_regmap(_hw), \
+ struct clk_alpha_pll, clkr)
+
+#define to_clk_alpha_pll_postdiv(_hw) container_of(to_clk_regmap(_hw), \
+ struct clk_alpha_pll_postdiv, clkr)
+
+static int wait_for_pll(struct clk_alpha_pll *pll, u32 mask, bool inverse,
+ const char *action)
+{
+ u32 val;
+ int count;
+ int ret;
+ const char *name = clk_hw_get_name(&pll->clkr.hw);
+
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return ret;
+
+ for (count = 100; count > 0; count--) {
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return ret;
+ if (inverse && !(val & mask))
+ return 0;
+ else if ((val & mask) == mask)
+ return 0;
+
+ udelay(1);
+ }
+
+ WARN(1, "%s failed to %s!\n", name, action);
+ return -ETIMEDOUT;
+}
+
+#define wait_for_pll_enable_active(pll) \
+ wait_for_pll(pll, PLL_ACTIVE_FLAG, 0, "enable")
+
+#define wait_for_pll_enable_lock(pll) \
+ wait_for_pll(pll, PLL_LOCK_DET, 0, "enable")
+
+#define wait_for_pll_disable(pll) \
+ wait_for_pll(pll, PLL_ACTIVE_FLAG, 1, "disable")
+
+#define wait_for_pll_offline(pll) \
+ wait_for_pll(pll, PLL_OFFLINE_ACK, 0, "offline")
+
+#define wait_for_pll_update(pll) \
+ wait_for_pll(pll, PLL_UPDATE, 1, "update")
+
+#define wait_for_pll_update_ack_set(pll) \
+ wait_for_pll(pll, ALPHA_PLL_ACK_LATCH, 0, "update_ack_set")
+
+#define wait_for_pll_update_ack_clear(pll) \
+ wait_for_pll(pll, ALPHA_PLL_ACK_LATCH, 1, "update_ack_clear")
+
+void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config)
+{
+ u32 val, mask;
+
+ regmap_write(regmap, PLL_L_VAL(pll), config->l);
+ regmap_write(regmap, PLL_ALPHA_VAL(pll), config->alpha);
+ regmap_write(regmap, PLL_CONFIG_CTL(pll), config->config_ctl_val);
+
+ if (pll_has_64bit_config(pll))
+ regmap_write(regmap, PLL_CONFIG_CTL_U(pll),
+ config->config_ctl_hi_val);
+
+ if (pll_alpha_width(pll) > 32)
+ regmap_write(regmap, PLL_ALPHA_VAL_U(pll), config->alpha_hi);
+
+ val = config->main_output_mask;
+ val |= config->aux_output_mask;
+ val |= config->aux2_output_mask;
+ val |= config->early_output_mask;
+ val |= config->pre_div_val;
+ val |= config->post_div_val;
+ val |= config->vco_val;
+ val |= config->alpha_en_mask;
+ val |= config->alpha_mode_mask;
+
+ mask = config->main_output_mask;
+ mask |= config->aux_output_mask;
+ mask |= config->aux2_output_mask;
+ mask |= config->early_output_mask;
+ mask |= config->pre_div_mask;
+ mask |= config->post_div_mask;
+ mask |= config->vco_mask;
+
+ regmap_update_bits(regmap, PLL_USER_CTL(pll), mask, val);
+
+ if (pll->flags & SUPPORTS_FSM_MODE)
+ qcom_pll_set_fsm_mode(regmap, PLL_MODE(pll), 6, 0);
+}
+EXPORT_SYMBOL_GPL(clk_alpha_pll_configure);
+
+static int clk_alpha_pll_hwfsm_enable(struct clk_hw *hw)
+{
+ int ret;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val;
+
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return ret;
+
+ val |= PLL_FSM_ENA;
+
+ if (pll->flags & SUPPORTS_OFFLINE_REQ)
+ val &= ~PLL_OFFLINE_REQ;
+
+ ret = regmap_write(pll->clkr.regmap, PLL_MODE(pll), val);
+ if (ret)
+ return ret;
+
+ /* Make sure enable request goes through before waiting for update */
+ mb();
+
+ return wait_for_pll_enable_active(pll);
+}
+
+static void clk_alpha_pll_hwfsm_disable(struct clk_hw *hw)
+{
+ int ret;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val;
+
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return;
+
+ if (pll->flags & SUPPORTS_OFFLINE_REQ) {
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
+ PLL_OFFLINE_REQ, PLL_OFFLINE_REQ);
+ if (ret)
+ return;
+
+ ret = wait_for_pll_offline(pll);
+ if (ret)
+ return;
+ }
+
+ /* Disable hwfsm */
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
+ PLL_FSM_ENA, 0);
+ if (ret)
+ return;
+
+ wait_for_pll_disable(pll);
+}
+
+static int pll_is_enabled(struct clk_hw *hw, u32 mask)
+{
+ int ret;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val;
+
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return ret;
+
+ return !!(val & mask);
+}
+
+static int clk_alpha_pll_hwfsm_is_enabled(struct clk_hw *hw)
+{
+ return pll_is_enabled(hw, PLL_ACTIVE_FLAG);
+}
+
+static int clk_alpha_pll_is_enabled(struct clk_hw *hw)
+{
+ return pll_is_enabled(hw, PLL_LOCK_DET);
+}
+
+static int clk_alpha_pll_enable(struct clk_hw *hw)
+{
+ int ret;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val, mask;
+
+ mask = PLL_OUTCTRL | PLL_RESET_N | PLL_BYPASSNL;
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return ret;
+
+ /* If in FSM mode, just vote for it */
+ if (val & PLL_VOTE_FSM_ENA) {
+ ret = clk_enable_regmap(hw);
+ if (ret)
+ return ret;
+ return wait_for_pll_enable_active(pll);
+ }
+
+ /* Skip if already enabled */
+ if ((val & mask) == mask)
+ return 0;
+
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
+ PLL_BYPASSNL, PLL_BYPASSNL);
+ if (ret)
+ return ret;
+
+ /*
+ * H/W requires a 5us delay between disabling the bypass and
+ * de-asserting the reset.
+ */
+ mb();
+ udelay(5);
+
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
+ PLL_RESET_N, PLL_RESET_N);
+ if (ret)
+ return ret;
+
+ ret = wait_for_pll_enable_lock(pll);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
+ PLL_OUTCTRL, PLL_OUTCTRL);
+
+ /* Ensure that the write above goes through before returning. */
+ mb();
+ return ret;
+}
+
+static void clk_alpha_pll_disable(struct clk_hw *hw)
+{
+ int ret;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val, mask;
+
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return;
+
+ /* If in FSM mode, just unvote it */
+ if (val & PLL_VOTE_FSM_ENA) {
+ clk_disable_regmap(hw);
+ return;
+ }
+
+ mask = PLL_OUTCTRL;
+ regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), mask, 0);
+
+ /* Delay of 2 output clock ticks required until output is disabled */
+ mb();
+ udelay(1);
+
+ mask = PLL_RESET_N | PLL_BYPASSNL;
+ regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), mask, 0);
+}
+
+static unsigned long
+alpha_pll_calc_rate(u64 prate, u32 l, u32 a, u32 alpha_width)
+{
+ return (prate * l) + ((prate * a) >> ALPHA_SHIFT(alpha_width));
+}
+
+static unsigned long
+alpha_pll_round_rate(unsigned long rate, unsigned long prate, u32 *l, u64 *a,
+ u32 alpha_width)
+{
+ u64 remainder;
+ u64 quotient;
+
+ quotient = rate;
+ remainder = do_div(quotient, prate);
+ *l = quotient;
+
+ if (!remainder) {
+ *a = 0;
+ return rate;
+ }
+
+ /* Upper ALPHA_BITWIDTH bits of Alpha */
+ quotient = remainder << ALPHA_SHIFT(alpha_width);
+
+ remainder = do_div(quotient, prate);
+
+ if (remainder)
+ quotient++;
+
+ *a = quotient;
+ return alpha_pll_calc_rate(prate, *l, *a, alpha_width);
+}
+
+static const struct pll_vco *
+alpha_pll_find_vco(const struct clk_alpha_pll *pll, unsigned long rate)
+{
+ const struct pll_vco *v = pll->vco_table;
+ const struct pll_vco *end = v + pll->num_vco;
+
+ for (; v < end; v++)
+ if (rate >= v->min_freq && rate <= v->max_freq)
+ return v;
+
+ return NULL;
+}
+
+static unsigned long
+clk_alpha_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ u32 l, low, high, ctl;
+ u64 a = 0, prate = parent_rate;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 alpha_width = pll_alpha_width(pll);
+
+ regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
+
+ regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
+ if (ctl & PLL_ALPHA_EN) {
+ regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &low);
+ if (alpha_width > 32) {
+ regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll),
+ &high);
+ a = (u64)high << 32 | low;
+ } else {
+ a = low & GENMASK(alpha_width - 1, 0);
+ }
+
+ if (alpha_width > ALPHA_BITWIDTH)
+ a >>= alpha_width - ALPHA_BITWIDTH;
+ }
+
+ return alpha_pll_calc_rate(prate, l, a, alpha_width);
+}
+
+
+static int __clk_alpha_pll_update_latch(struct clk_alpha_pll *pll)
+{
+ int ret;
+ u32 mode;
+
+ regmap_read(pll->clkr.regmap, PLL_MODE(pll), &mode);
+
+ /* Latch the input to the PLL */
+ regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), PLL_UPDATE,
+ PLL_UPDATE);
+
+ /* Wait for 2 reference cycle before checking ACK bit */
+ udelay(1);
+
+ /*
+ * PLL will latch the new L, Alpha and freq control word.
+ * PLL will respond by raising PLL_ACK_LATCH output when new programming
+ * has been latched in and PLL is being updated. When
+ * UPDATE_LOGIC_BYPASS bit is not set, PLL_UPDATE will be cleared
+ * automatically by hardware when PLL_ACK_LATCH is asserted by PLL.
+ */
+ if (mode & PLL_UPDATE_BYPASS) {
+ ret = wait_for_pll_update_ack_set(pll);
+ if (ret)
+ return ret;
+
+ regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), PLL_UPDATE, 0);
+ } else {
+ ret = wait_for_pll_update(pll);
+ if (ret)
+ return ret;
+ }
+
+ ret = wait_for_pll_update_ack_clear(pll);
+ if (ret)
+ return ret;
+
+ /* Wait for PLL output to stabilize */
+ udelay(10);
+
+ return 0;
+}
+
+static int clk_alpha_pll_update_latch(struct clk_alpha_pll *pll,
+ int (*is_enabled)(struct clk_hw *))
+{
+ if (!is_enabled(&pll->clkr.hw) ||
+ !(pll->flags & SUPPORTS_DYNAMIC_UPDATE))
+ return 0;
+
+ return __clk_alpha_pll_update_latch(pll);
+}
+
+static int __clk_alpha_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate,
+ int (*is_enabled)(struct clk_hw *))
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ const struct pll_vco *vco;
+ u32 l, alpha_width = pll_alpha_width(pll);
+ u64 a;
+
+ rate = alpha_pll_round_rate(rate, prate, &l, &a, alpha_width);
+ vco = alpha_pll_find_vco(pll, rate);
+ if (pll->vco_table && !vco) {
+ pr_err("alpha pll not in a valid vco range\n");
+ return -EINVAL;
+ }
+
+ regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
+
+ if (alpha_width > ALPHA_BITWIDTH)
+ a <<= alpha_width - ALPHA_BITWIDTH;
+
+ if (alpha_width > 32)
+ regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll), a >> 32);
+
+ regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a);
+
+ if (vco) {
+ regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
+ PLL_VCO_MASK << PLL_VCO_SHIFT,
+ vco->val << PLL_VCO_SHIFT);
+ }
+
+ regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
+ PLL_ALPHA_EN, PLL_ALPHA_EN);
+
+ return clk_alpha_pll_update_latch(pll, is_enabled);
+}
+
+static int clk_alpha_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ return __clk_alpha_pll_set_rate(hw, rate, prate,
+ clk_alpha_pll_is_enabled);
+}
+
+static int clk_alpha_pll_hwfsm_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ return __clk_alpha_pll_set_rate(hw, rate, prate,
+ clk_alpha_pll_hwfsm_is_enabled);
+}
+
+static long clk_alpha_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 l, alpha_width = pll_alpha_width(pll);
+ u64 a;
+ unsigned long min_freq, max_freq;
+
+ rate = alpha_pll_round_rate(rate, *prate, &l, &a, alpha_width);
+ if (!pll->vco_table || alpha_pll_find_vco(pll, rate))
+ return rate;
+
+ min_freq = pll->vco_table[0].min_freq;
+ max_freq = pll->vco_table[pll->num_vco - 1].max_freq;
+
+ return clamp(rate, min_freq, max_freq);
+}
+
+static unsigned long
+alpha_huayra_pll_calc_rate(u64 prate, u32 l, u32 a)
+{
+ /*
+ * a contains 16 bit alpha_val in two’s compliment number in the range
+ * of [-0.5, 0.5).
+ */
+ if (a >= BIT(PLL_HUAYRA_ALPHA_WIDTH - 1))
+ l -= 1;
+
+ return (prate * l) + (prate * a >> PLL_HUAYRA_ALPHA_WIDTH);
+}
+
+static unsigned long
+alpha_huayra_pll_round_rate(unsigned long rate, unsigned long prate,
+ u32 *l, u32 *a)
+{
+ u64 remainder;
+ u64 quotient;
+
+ quotient = rate;
+ remainder = do_div(quotient, prate);
+ *l = quotient;
+
+ if (!remainder) {
+ *a = 0;
+ return rate;
+ }
+
+ quotient = remainder << PLL_HUAYRA_ALPHA_WIDTH;
+ remainder = do_div(quotient, prate);
+
+ if (remainder)
+ quotient++;
+
+ /*
+ * alpha_val should be in two’s compliment number in the range
+ * of [-0.5, 0.5) so if quotient >= 0.5 then increment the l value
+ * since alpha value will be subtracted in this case.
+ */
+ if (quotient >= BIT(PLL_HUAYRA_ALPHA_WIDTH - 1))
+ *l += 1;
+
+ *a = quotient;
+ return alpha_huayra_pll_calc_rate(prate, *l, *a);
+}
+
+static unsigned long
+alpha_pll_huayra_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ u64 rate = parent_rate, tmp;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 l, alpha = 0, ctl, alpha_m, alpha_n;
+
+ regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
+ regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
+
+ if (ctl & PLL_ALPHA_EN) {
+ regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &alpha);
+ /*
+ * Depending upon alpha_mode, it can be treated as M/N value or
+ * as a two’s compliment number. When alpha_mode=1,
+ * pll_alpha_val<15:8>=M and pll_apla_val<7:0>=N
+ *
+ * Fout=FIN*(L+(M/N))
+ *
+ * M is a signed number (-128 to 127) and N is unsigned
+ * (0 to 255). M/N has to be within +/-0.5.
+ *
+ * When alpha_mode=0, it is a two’s compliment number in the
+ * range [-0.5, 0.5).
+ *
+ * Fout=FIN*(L+(alpha_val)/2^16)
+ *
+ * where alpha_val is two’s compliment number.
+ */
+ if (!(ctl & PLL_ALPHA_MODE))
+ return alpha_huayra_pll_calc_rate(rate, l, alpha);
+
+ alpha_m = alpha >> PLL_HUAYRA_M_SHIFT & PLL_HUAYRA_M_MASK;
+ alpha_n = alpha >> PLL_HUAYRA_N_SHIFT & PLL_HUAYRA_N_MASK;
+
+ rate *= l;
+ tmp = parent_rate;
+ if (alpha_m >= BIT(PLL_HUAYRA_M_WIDTH - 1)) {
+ alpha_m = BIT(PLL_HUAYRA_M_WIDTH) - alpha_m;
+ tmp *= alpha_m;
+ do_div(tmp, alpha_n);
+ rate -= tmp;
+ } else {
+ tmp *= alpha_m;
+ do_div(tmp, alpha_n);
+ rate += tmp;
+ }
+
+ return rate;
+ }
+
+ return alpha_huayra_pll_calc_rate(rate, l, alpha);
+}
+
+static int alpha_pll_huayra_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 l, a, ctl, cur_alpha = 0;
+
+ rate = alpha_huayra_pll_round_rate(rate, prate, &l, &a);
+
+ regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
+
+ if (ctl & PLL_ALPHA_EN)
+ regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &cur_alpha);
+
+ /*
+ * Huayra PLL supports PLL dynamic programming. User can change L_VAL,
+ * without having to go through the power on sequence.
+ */
+ if (clk_alpha_pll_is_enabled(hw)) {
+ if (cur_alpha != a) {
+ pr_err("clock needs to be gated %s\n",
+ clk_hw_get_name(hw));
+ return -EBUSY;
+ }
+
+ regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
+ /* Ensure that the write above goes to detect L val change. */
+ mb();
+ return wait_for_pll_enable_lock(pll);
+ }
+
+ regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
+ regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a);
+
+ if (a == 0)
+ regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
+ PLL_ALPHA_EN, 0x0);
+ else
+ regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
+ PLL_ALPHA_EN | PLL_ALPHA_MODE, PLL_ALPHA_EN);
+
+ return 0;
+}
+
+static long alpha_pll_huayra_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ u32 l, a;
+
+ return alpha_huayra_pll_round_rate(rate, *prate, &l, &a);
+}
+
+const struct clk_ops clk_alpha_pll_ops = {
+ .enable = clk_alpha_pll_enable,
+ .disable = clk_alpha_pll_disable,
+ .is_enabled = clk_alpha_pll_is_enabled,
+ .recalc_rate = clk_alpha_pll_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+ .set_rate = clk_alpha_pll_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_ops);
+
+const struct clk_ops clk_alpha_pll_huayra_ops = {
+ .enable = clk_alpha_pll_enable,
+ .disable = clk_alpha_pll_disable,
+ .is_enabled = clk_alpha_pll_is_enabled,
+ .recalc_rate = alpha_pll_huayra_recalc_rate,
+ .round_rate = alpha_pll_huayra_round_rate,
+ .set_rate = alpha_pll_huayra_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_huayra_ops);
+
+const struct clk_ops clk_alpha_pll_hwfsm_ops = {
+ .enable = clk_alpha_pll_hwfsm_enable,
+ .disable = clk_alpha_pll_hwfsm_disable,
+ .is_enabled = clk_alpha_pll_hwfsm_is_enabled,
+ .recalc_rate = clk_alpha_pll_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+ .set_rate = clk_alpha_pll_hwfsm_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_hwfsm_ops);
+
+static unsigned long
+clk_alpha_pll_postdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+ u32 ctl;
+
+ regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
+
+ ctl >>= PLL_POST_DIV_SHIFT;
+ ctl &= PLL_POST_DIV_MASK(pll);
+
+ return parent_rate >> fls(ctl);
+}
+
+static const struct clk_div_table clk_alpha_div_table[] = {
+ { 0x0, 1 },
+ { 0x1, 2 },
+ { 0x3, 4 },
+ { 0x7, 8 },
+ { 0xf, 16 },
+ { }
+};
+
+static const struct clk_div_table clk_alpha_2bit_div_table[] = {
+ { 0x0, 1 },
+ { 0x1, 2 },
+ { 0x3, 4 },
+ { }
+};
+
+static long
+clk_alpha_pll_postdiv_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+ const struct clk_div_table *table;
+
+ if (pll->width == 2)
+ table = clk_alpha_2bit_div_table;
+ else
+ table = clk_alpha_div_table;
+
+ return divider_round_rate(hw, rate, prate, table,
+ pll->width, CLK_DIVIDER_POWER_OF_TWO);
+}
+
+static long
+clk_alpha_pll_postdiv_round_ro_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+ u32 ctl, div;
+
+ regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
+
+ ctl >>= PLL_POST_DIV_SHIFT;
+ ctl &= BIT(pll->width) - 1;
+ div = 1 << fls(ctl);
+
+ if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)
+ *prate = clk_hw_round_rate(clk_hw_get_parent(hw), div * rate);
+
+ return DIV_ROUND_UP_ULL((u64)*prate, div);
+}
+
+static int clk_alpha_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+ int div;
+
+ /* 16 -> 0xf, 8 -> 0x7, 4 -> 0x3, 2 -> 0x1, 1 -> 0x0 */
+ div = DIV_ROUND_UP_ULL((u64)parent_rate, rate) - 1;
+
+ return regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
+ PLL_POST_DIV_MASK(pll) << PLL_POST_DIV_SHIFT,
+ div << PLL_POST_DIV_SHIFT);
+}
+
+const struct clk_ops clk_alpha_pll_postdiv_ops = {
+ .recalc_rate = clk_alpha_pll_postdiv_recalc_rate,
+ .round_rate = clk_alpha_pll_postdiv_round_rate,
+ .set_rate = clk_alpha_pll_postdiv_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_ops);
+
+const struct clk_ops clk_alpha_pll_postdiv_ro_ops = {
+ .round_rate = clk_alpha_pll_postdiv_round_ro_rate,
+ .recalc_rate = clk_alpha_pll_postdiv_recalc_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_ro_ops);
+
+void clk_fabia_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config)
+{
+ u32 val, mask;
+
+ if (config->l)
+ regmap_write(regmap, PLL_L_VAL(pll), config->l);
+
+ if (config->alpha)
+ regmap_write(regmap, PLL_FRAC(pll), config->alpha);
+
+ if (config->config_ctl_val)
+ regmap_write(regmap, PLL_CONFIG_CTL(pll),
+ config->config_ctl_val);
+
+ if (config->post_div_mask) {
+ mask = config->post_div_mask;
+ val = config->post_div_val;
+ regmap_update_bits(regmap, PLL_USER_CTL(pll), mask, val);
+ }
+
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_UPDATE_BYPASS,
+ PLL_UPDATE_BYPASS);
+
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N);
+}
+<<<<<<<
+=======
+EXPORT_SYMBOL_GPL(clk_fabia_pll_configure);
+>>>>>>>
+
+static int alpha_pll_fabia_enable(struct clk_hw *hw)
+{
+ int ret;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val, opmode_val;
+<<<<<<<
+ struct regmap *regmap = pll->clkr.regmap;
+
+ ret = regmap_read(regmap, PLL_MODE(pll), &val);
+=======
+
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+>>>>>>>
+ if (ret)
+ return ret;
+
+ /* If in FSM mode, just vote for it */
+ if (val & PLL_VOTE_FSM_ENA) {
+ ret = clk_enable_regmap(hw);
+ if (ret)
+ return ret;
+ return wait_for_pll_enable_active(pll);
+ }
+
+<<<<<<<
+ /* Read opmode value */
+ ret = regmap_read(pll->clkr.regmap, PLL_OPMODE(pll), &opmode_val);
+=======
+ ret = regmap_read(regmap, PLL_OPMODE(pll), &opmode_val);
+>>>>>>>
+ if (ret)
+ return ret;
+
+ /* Skip If PLL is already running */
+ if ((opmode_val & FABIA_OPMODE_RUN) && (val & PLL_OUTCTRL))
+ return 0;
+
+<<<<<<<
+ /* Disable PLL output */
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
+ PLL_OUTCTRL, 0);
+ if (ret)
+ return ret;
+
+ /* Set Operation mode to STANBY */
+ ret = regmap_write(pll->clkr.regmap, PLL_OPMODE(pll),
+ FABIA_OPMODE_STANDBY);
+ if (ret)
+ return ret;
+
+ /* PLL should be in STANDBY mode before continuing */
+ mb();
+
+ /* Bring PLL out of reset */
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
+ PLL_RESET_N, PLL_RESET_N);
+ if (ret)
+ return ret;
+
+ /* Set Operation mode to RUN */
+ ret = regmap_write(pll->clkr.regmap, PLL_OPMODE(pll),
+ FABIA_OPMODE_RUN);
+=======
+ ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(regmap, PLL_OPMODE(pll), FABIA_OPMODE_STANDBY);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N,
+ PLL_RESET_N);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(regmap, PLL_OPMODE(pll), FABIA_OPMODE_RUN);
+>>>>>>>
+ if (ret)
+ return ret;
+
+ ret = wait_for_pll_enable_lock(pll);
+ if (ret)
+ return ret;
+
+<<<<<<<
+ /* Enable the main PLL output */
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
+ FABIA_PLL_OUT_MASK, FABIA_PLL_OUT_MASK);
+ if (ret)
+ return ret;
+
+ /* Enable PLL outputs */
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
+ PLL_OUTCTRL, PLL_OUTCTRL);
+ if (ret)
+ return ret;
+
+ /* Ensure that the write above goes through before returning. */
+ mb();
+
+ return ret;
+=======
+ ret = regmap_update_bits(regmap, PLL_USER_CTL(pll),
+ FABIA_PLL_OUT_MASK, FABIA_PLL_OUT_MASK);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL,
+ PLL_OUTCTRL);
+>>>>>>>
+}
+
+static void alpha_pll_fabia_disable(struct clk_hw *hw)
+{
+ int ret;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val;
+<<<<<<<
+ struct regmap *regmap = pll->clkr.regmap;
+
+ ret = regmap_read(regmap, PLL_MODE(pll), &val);
+=======
+
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+>>>>>>>
+ if (ret)
+ return;
+
+ /* If in FSM mode, just unvote it */
+ if (val & PLL_FSM_ENA) {
+ clk_disable_regmap(hw);
+ return;
+ }
+
+<<<<<<<
+ /* Disable PLL outputs */
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
+ PLL_OUTCTRL, 0);
+=======
+ ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0);
+>>>>>>>
+ if (ret)
+ return;
+
+ /* Disable main outputs */
+<<<<<<<
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
+ FABIA_PLL_OUT_MASK, 0);
+=======
+ ret = regmap_update_bits(regmap, PLL_USER_CTL(pll), FABIA_PLL_OUT_MASK,
+ 0);
+>>>>>>>
+ if (ret)
+ return;
+
+ /* Place the PLL in STANDBY */
+<<<<<<<
+ regmap_write(regmap, PLL_OPMODE(pll), FABIA_OPMODE_STANDBY);
+=======
+ ret = regmap_write(pll->clkr.regmap, PLL_OPMODE(pll),
+ FABIA_OPMODE_STANDBY);
+ if (ret)
+ return;
+>>>>>>>
+}
+
+static unsigned long alpha_pll_fabia_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 l, frac, alpha_width = pll_alpha_width(pll);
+
+ regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
+ regmap_read(pll->clkr.regmap, PLL_FRAC(pll), &frac);
+
+ return alpha_pll_calc_rate(parent_rate, l, frac, alpha_width);
+}
+
+static int alpha_pll_fabia_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val, l, alpha_width = pll_alpha_width(pll);
+ u64 a;
+ unsigned long rrate;
+ int ret = 0;
+
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return ret;
+
+ rrate = alpha_pll_round_rate(rate, prate, &l, &a, alpha_width);
+
+ /*
+ * Due to limited number of bits for fractional rate programming, the
+ * rounded up rate could be marginally higher than the requested rate.
+ */
+ if (rrate > (rate + FABIA_PLL_RATE_MARGIN) || rrate < rate) {
+ pr_err("Call set rate on the PLL with rounded rates!\n");
+ return -EINVAL;
+ }
+
+ regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
+ regmap_write(pll->clkr.regmap, PLL_FRAC(pll), a);
+
+ return __clk_alpha_pll_update_latch(pll);
+}
+
+const struct clk_ops clk_alpha_pll_fabia_ops = {
+ .enable = alpha_pll_fabia_enable,
+ .disable = alpha_pll_fabia_disable,
+ .is_enabled = clk_alpha_pll_is_enabled,
+ .set_rate = alpha_pll_fabia_set_rate,
+ .recalc_rate = alpha_pll_fabia_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_fabia_ops);
+
+const struct clk_ops clk_alpha_pll_fixed_fabia_ops = {
+ .enable = alpha_pll_fabia_enable,
+ .disable = alpha_pll_fabia_disable,
+ .is_enabled = clk_alpha_pll_is_enabled,
+ .recalc_rate = alpha_pll_fabia_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_fixed_fabia_ops);
+
+static unsigned long clk_alpha_pll_postdiv_fabia_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+ u32 i, div = 1, val;
+ int ret;
+
+ if (!pll->post_div_table) {
+ pr_err("Missing the post_div_table for the PLL\n");
+ return -EINVAL;
+ }
+
+ ret = regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &val);
+ if (ret)
+ return ret;
+
+ val >>= pll->post_div_shift;
+ val &= BIT(pll->width) - 1;
+
+ for (i = 0; i < pll->num_post_div; i++) {
+ if (pll->post_div_table[i].val == val) {
+ div = pll->post_div_table[i].div;
+ break;
+ }
+ }
+
+ return (parent_rate / div);
+}
+
+static long clk_alpha_pll_postdiv_fabia_round_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long *prate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+
+ if (!pll->post_div_table) {
+ pr_err("Missing the post_div_table for the PLL\n");
+ return -EINVAL;
+ }
+
+ return divider_round_rate(hw, rate, prate, pll->post_div_table,
+ pll->width, CLK_DIVIDER_ROUND_CLOSEST);
+}
+
+static int clk_alpha_pll_postdiv_fabia_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+ int i, val = 0, div, ret;
+
+ /*
+ * If the PLL is in FSM mode, then treat set_rate callback as a
+ * no-operation.
+ */
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return ret;
+
+ if (val & PLL_VOTE_FSM_ENA)
+ return 0;
+
+ if (!pll->post_div_table) {
+ pr_err("Missing the post_div_table for the PLL\n");
+ return -EINVAL;
+ }
+
+ div = DIV_ROUND_UP_ULL((u64)parent_rate, rate);
+ for (i = 0; i < pll->num_post_div; i++) {
+ if (pll->post_div_table[i].div == div) {
+ val = pll->post_div_table[i].val;
+ break;
+ }
+ }
+
+ return regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
+ (BIT(pll->width) - 1) << pll->post_div_shift,
+ val << pll->post_div_shift);
+}
+
+const struct clk_ops clk_alpha_pll_postdiv_fabia_ops = {
+ .recalc_rate = clk_alpha_pll_postdiv_fabia_recalc_rate,
+ .round_rate = clk_alpha_pll_postdiv_fabia_round_rate,
+ .set_rate = clk_alpha_pll_postdiv_fabia_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_fabia_ops);
diff --git a/rr-cache/5a7fbb8eb469ab7e1359820fd91982b9b6c7f2a6/preimage.2 b/rr-cache/5a7fbb8eb469ab7e1359820fd91982b9b6c7f2a6/preimage.2
new file mode 100644
index 0000000..4765333
--- /dev/null
+++ b/rr-cache/5a7fbb8eb469ab7e1359820fd91982b9b6c7f2a6/preimage.2
@@ -0,0 +1,1204 @@
+/*
+ * Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+
+#include "clk-alpha-pll.h"
+#include "common.h"
+
+#define PLL_MODE(p) ((p)->offset + 0x0)
+# define PLL_OUTCTRL BIT(0)
+# define PLL_BYPASSNL BIT(1)
+# define PLL_RESET_N BIT(2)
+# define PLL_OFFLINE_REQ BIT(7)
+# define PLL_LOCK_COUNT_SHIFT 8
+# define PLL_LOCK_COUNT_MASK 0x3f
+# define PLL_BIAS_COUNT_SHIFT 14
+# define PLL_BIAS_COUNT_MASK 0x3f
+# define PLL_VOTE_FSM_ENA BIT(20)
+# define PLL_FSM_ENA BIT(20)
+# define PLL_VOTE_FSM_RESET BIT(21)
+# define PLL_UPDATE BIT(22)
+# define PLL_UPDATE_BYPASS BIT(23)
+# define PLL_OFFLINE_ACK BIT(28)
+# define ALPHA_PLL_ACK_LATCH BIT(29)
+# define PLL_ACTIVE_FLAG BIT(30)
+# define PLL_LOCK_DET BIT(31)
+
+#define PLL_L_VAL(p) ((p)->offset + (p)->regs[PLL_OFF_L_VAL])
+#define PLL_ALPHA_VAL(p) ((p)->offset + (p)->regs[PLL_OFF_ALPHA_VAL])
+#define PLL_ALPHA_VAL_U(p) ((p)->offset + (p)->regs[PLL_OFF_ALPHA_VAL_U])
+
+#define PLL_USER_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_USER_CTL])
+# define PLL_POST_DIV_SHIFT 8
+# define PLL_POST_DIV_MASK(p) GENMASK((p)->width, 0)
+# define PLL_ALPHA_EN BIT(24)
+# define PLL_ALPHA_MODE BIT(25)
+# define PLL_VCO_SHIFT 20
+# define PLL_VCO_MASK 0x3
+
+#define PLL_USER_CTL_U(p) ((p)->offset + (p)->regs[PLL_OFF_USER_CTL_U])
+
+#define PLL_CONFIG_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_CONFIG_CTL])
+#define PLL_CONFIG_CTL_U(p) ((p)->offset + (p)->regs[PLL_OFF_CONFIG_CTL_U])
+#define PLL_TEST_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL])
+#define PLL_TEST_CTL_U(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL_U])
+#define PLL_STATUS(p) ((p)->offset + (p)->regs[PLL_OFF_STATUS])
+#define PLL_OPMODE(p) ((p)->offset + (p)->regs[PLL_OFF_OPMODE])
+#define PLL_FRAC(p) ((p)->offset + (p)->regs[PLL_OFF_FRAC])
+
+const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
+ [CLK_ALPHA_PLL_TYPE_DEFAULT] = {
+ [PLL_OFF_L_VAL] = 0x04,
+ [PLL_OFF_ALPHA_VAL] = 0x08,
+ [PLL_OFF_ALPHA_VAL_U] = 0x0c,
+ [PLL_OFF_USER_CTL] = 0x10,
+ [PLL_OFF_USER_CTL_U] = 0x14,
+ [PLL_OFF_CONFIG_CTL] = 0x18,
+ [PLL_OFF_TEST_CTL] = 0x1c,
+ [PLL_OFF_TEST_CTL_U] = 0x20,
+ [PLL_OFF_STATUS] = 0x24,
+ },
+ [CLK_ALPHA_PLL_TYPE_HUAYRA] = {
+ [PLL_OFF_L_VAL] = 0x04,
+ [PLL_OFF_ALPHA_VAL] = 0x08,
+ [PLL_OFF_USER_CTL] = 0x10,
+ [PLL_OFF_CONFIG_CTL] = 0x14,
+ [PLL_OFF_CONFIG_CTL_U] = 0x18,
+ [PLL_OFF_TEST_CTL] = 0x1c,
+ [PLL_OFF_TEST_CTL_U] = 0x20,
+ [PLL_OFF_STATUS] = 0x24,
+ },
+ [CLK_ALPHA_PLL_TYPE_BRAMMO] = {
+ [PLL_OFF_L_VAL] = 0x04,
+ [PLL_OFF_ALPHA_VAL] = 0x08,
+ [PLL_OFF_ALPHA_VAL_U] = 0x0c,
+ [PLL_OFF_USER_CTL] = 0x10,
+ [PLL_OFF_CONFIG_CTL] = 0x18,
+ [PLL_OFF_TEST_CTL] = 0x1c,
+ [PLL_OFF_STATUS] = 0x24,
+ },
+ [CLK_ALPHA_PLL_TYPE_FABIA] = {
+ [PLL_OFF_L_VAL] = 0x04,
+ [PLL_OFF_USER_CTL] = 0x0c,
+ [PLL_OFF_USER_CTL_U] = 0x10,
+ [PLL_OFF_CONFIG_CTL] = 0x14,
+ [PLL_OFF_CONFIG_CTL_U] = 0x18,
+ [PLL_OFF_TEST_CTL] = 0x1c,
+ [PLL_OFF_TEST_CTL_U] = 0x20,
+ [PLL_OFF_STATUS] = 0x24,
+ [PLL_OFF_OPMODE] = 0x2c,
+ [PLL_OFF_FRAC] = 0x38,
+ },
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
+
+/*
+ * Even though 40 bits are present, use only 32 for ease of calculation.
+ */
+#define ALPHA_REG_BITWIDTH 40
+#define ALPHA_REG_16BIT_WIDTH 16
+#define ALPHA_BITWIDTH 32U
+#define ALPHA_SHIFT(w) min(w, ALPHA_BITWIDTH)
+
+#define PLL_HUAYRA_M_WIDTH 8
+#define PLL_HUAYRA_M_SHIFT 8
+#define PLL_HUAYRA_M_MASK 0xff
+#define PLL_HUAYRA_N_SHIFT 0
+#define PLL_HUAYRA_N_MASK 0xff
+#define PLL_HUAYRA_ALPHA_WIDTH 16
+
+#define FABIA_OPMODE_STANDBY 0x0
+#define FABIA_OPMODE_RUN 0x1
+
+#define FABIA_PLL_OUT_MASK 0x7
+#define FABIA_PLL_RATE_MARGIN 500
+
+#define pll_alpha_width(p) \
+ ((PLL_ALPHA_VAL_U(p) - PLL_ALPHA_VAL(p) == 4) ? \
+ ALPHA_REG_BITWIDTH : ALPHA_REG_16BIT_WIDTH)
+
+#define pll_has_64bit_config(p) ((PLL_CONFIG_CTL_U(p) - PLL_CONFIG_CTL(p)) == 4)
+
+#define to_clk_alpha_pll(_hw) container_of(to_clk_regmap(_hw), \
+ struct clk_alpha_pll, clkr)
+
+#define to_clk_alpha_pll_postdiv(_hw) container_of(to_clk_regmap(_hw), \
+ struct clk_alpha_pll_postdiv, clkr)
+
+static int wait_for_pll(struct clk_alpha_pll *pll, u32 mask, bool inverse,
+ const char *action)
+{
+ u32 val;
+ int count;
+ int ret;
+ const char *name = clk_hw_get_name(&pll->clkr.hw);
+
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return ret;
+
+ for (count = 100; count > 0; count--) {
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return ret;
+ if (inverse && !(val & mask))
+ return 0;
+ else if ((val & mask) == mask)
+ return 0;
+
+ udelay(1);
+ }
+
+ WARN(1, "%s failed to %s!\n", name, action);
+ return -ETIMEDOUT;
+}
+
+#define wait_for_pll_enable_active(pll) \
+ wait_for_pll(pll, PLL_ACTIVE_FLAG, 0, "enable")
+
+#define wait_for_pll_enable_lock(pll) \
+ wait_for_pll(pll, PLL_LOCK_DET, 0, "enable")
+
+#define wait_for_pll_disable(pll) \
+ wait_for_pll(pll, PLL_ACTIVE_FLAG, 1, "disable")
+
+#define wait_for_pll_offline(pll) \
+ wait_for_pll(pll, PLL_OFFLINE_ACK, 0, "offline")
+
+#define wait_for_pll_update(pll) \
+ wait_for_pll(pll, PLL_UPDATE, 1, "update")
+
+#define wait_for_pll_update_ack_set(pll) \
+ wait_for_pll(pll, ALPHA_PLL_ACK_LATCH, 0, "update_ack_set")
+
+#define wait_for_pll_update_ack_clear(pll) \
+ wait_for_pll(pll, ALPHA_PLL_ACK_LATCH, 1, "update_ack_clear")
+
+void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config)
+{
+ u32 val, mask;
+
+ regmap_write(regmap, PLL_L_VAL(pll), config->l);
+ regmap_write(regmap, PLL_ALPHA_VAL(pll), config->alpha);
+ regmap_write(regmap, PLL_CONFIG_CTL(pll), config->config_ctl_val);
+
+ if (pll_has_64bit_config(pll))
+ regmap_write(regmap, PLL_CONFIG_CTL_U(pll),
+ config->config_ctl_hi_val);
+
+ if (pll_alpha_width(pll) > 32)
+ regmap_write(regmap, PLL_ALPHA_VAL_U(pll), config->alpha_hi);
+
+ val = config->main_output_mask;
+ val |= config->aux_output_mask;
+ val |= config->aux2_output_mask;
+ val |= config->early_output_mask;
+ val |= config->pre_div_val;
+ val |= config->post_div_val;
+ val |= config->vco_val;
+ val |= config->alpha_en_mask;
+ val |= config->alpha_mode_mask;
+
+ mask = config->main_output_mask;
+ mask |= config->aux_output_mask;
+ mask |= config->aux2_output_mask;
+ mask |= config->early_output_mask;
+ mask |= config->pre_div_mask;
+ mask |= config->post_div_mask;
+ mask |= config->vco_mask;
+
+ regmap_update_bits(regmap, PLL_USER_CTL(pll), mask, val);
+
+ if (pll->flags & SUPPORTS_FSM_MODE)
+ qcom_pll_set_fsm_mode(regmap, PLL_MODE(pll), 6, 0);
+}
+
+static int clk_alpha_pll_hwfsm_enable(struct clk_hw *hw)
+{
+ int ret;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val;
+
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return ret;
+
+ val |= PLL_FSM_ENA;
+
+ if (pll->flags & SUPPORTS_OFFLINE_REQ)
+ val &= ~PLL_OFFLINE_REQ;
+
+ ret = regmap_write(pll->clkr.regmap, PLL_MODE(pll), val);
+ if (ret)
+ return ret;
+
+ /* Make sure enable request goes through before waiting for update */
+ mb();
+
+ return wait_for_pll_enable_active(pll);
+}
+
+static void clk_alpha_pll_hwfsm_disable(struct clk_hw *hw)
+{
+ int ret;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val;
+
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return;
+
+ if (pll->flags & SUPPORTS_OFFLINE_REQ) {
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
+ PLL_OFFLINE_REQ, PLL_OFFLINE_REQ);
+ if (ret)
+ return;
+
+ ret = wait_for_pll_offline(pll);
+ if (ret)
+ return;
+ }
+
+ /* Disable hwfsm */
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
+ PLL_FSM_ENA, 0);
+ if (ret)
+ return;
+
+ wait_for_pll_disable(pll);
+}
+
+static int pll_is_enabled(struct clk_hw *hw, u32 mask)
+{
+ int ret;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val;
+
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return ret;
+
+ return !!(val & mask);
+}
+
+static int clk_alpha_pll_hwfsm_is_enabled(struct clk_hw *hw)
+{
+ return pll_is_enabled(hw, PLL_ACTIVE_FLAG);
+}
+
+static int clk_alpha_pll_is_enabled(struct clk_hw *hw)
+{
+ return pll_is_enabled(hw, PLL_LOCK_DET);
+}
+
+static int clk_alpha_pll_enable(struct clk_hw *hw)
+{
+ int ret;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val, mask;
+
+ mask = PLL_OUTCTRL | PLL_RESET_N | PLL_BYPASSNL;
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return ret;
+
+ /* If in FSM mode, just vote for it */
+ if (val & PLL_VOTE_FSM_ENA) {
+ ret = clk_enable_regmap(hw);
+ if (ret)
+ return ret;
+ return wait_for_pll_enable_active(pll);
+ }
+
+ /* Skip if already enabled */
+ if ((val & mask) == mask)
+ return 0;
+
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
+ PLL_BYPASSNL, PLL_BYPASSNL);
+ if (ret)
+ return ret;
+
+ /*
+ * H/W requires a 5us delay between disabling the bypass and
+ * de-asserting the reset.
+ */
+ mb();
+ udelay(5);
+
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
+ PLL_RESET_N, PLL_RESET_N);
+ if (ret)
+ return ret;
+
+ ret = wait_for_pll_enable_lock(pll);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
+ PLL_OUTCTRL, PLL_OUTCTRL);
+
+ /* Ensure that the write above goes through before returning. */
+ mb();
+ return ret;
+}
+
+static void clk_alpha_pll_disable(struct clk_hw *hw)
+{
+ int ret;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val, mask;
+
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return;
+
+ /* If in FSM mode, just unvote it */
+ if (val & PLL_VOTE_FSM_ENA) {
+ clk_disable_regmap(hw);
+ return;
+ }
+
+ mask = PLL_OUTCTRL;
+ regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), mask, 0);
+
+ /* Delay of 2 output clock ticks required until output is disabled */
+ mb();
+ udelay(1);
+
+ mask = PLL_RESET_N | PLL_BYPASSNL;
+ regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), mask, 0);
+}
+
+static unsigned long
+alpha_pll_calc_rate(u64 prate, u32 l, u32 a, u32 alpha_width)
+{
+ return (prate * l) + ((prate * a) >> ALPHA_SHIFT(alpha_width));
+}
+
+static unsigned long
+alpha_pll_round_rate(unsigned long rate, unsigned long prate, u32 *l, u64 *a,
+ u32 alpha_width)
+{
+ u64 remainder;
+ u64 quotient;
+
+ quotient = rate;
+ remainder = do_div(quotient, prate);
+ *l = quotient;
+
+ if (!remainder) {
+ *a = 0;
+ return rate;
+ }
+
+ /* Upper ALPHA_BITWIDTH bits of Alpha */
+ quotient = remainder << ALPHA_SHIFT(alpha_width);
+
+ remainder = do_div(quotient, prate);
+
+ if (remainder)
+ quotient++;
+
+ *a = quotient;
+ return alpha_pll_calc_rate(prate, *l, *a, alpha_width);
+}
+
+static const struct pll_vco *
+alpha_pll_find_vco(const struct clk_alpha_pll *pll, unsigned long rate)
+{
+ const struct pll_vco *v = pll->vco_table;
+ const struct pll_vco *end = v + pll->num_vco;
+
+ for (; v < end; v++)
+ if (rate >= v->min_freq && rate <= v->max_freq)
+ return v;
+
+ return NULL;
+}
+
+static unsigned long
+clk_alpha_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ u32 l, low, high, ctl;
+ u64 a = 0, prate = parent_rate;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 alpha_width = pll_alpha_width(pll);
+
+ regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
+
+ regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
+ if (ctl & PLL_ALPHA_EN) {
+ regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &low);
+ if (alpha_width > 32) {
+ regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll),
+ &high);
+ a = (u64)high << 32 | low;
+ } else {
+ a = low & GENMASK(alpha_width - 1, 0);
+ }
+
+ if (alpha_width > ALPHA_BITWIDTH)
+ a >>= alpha_width - ALPHA_BITWIDTH;
+ }
+
+ return alpha_pll_calc_rate(prate, l, a, alpha_width);
+}
+
+
+static int __clk_alpha_pll_update_latch(struct clk_alpha_pll *pll)
+{
+ int ret;
+ u32 mode;
+
+ regmap_read(pll->clkr.regmap, PLL_MODE(pll), &mode);
+
+ /* Latch the input to the PLL */
+ regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), PLL_UPDATE,
+ PLL_UPDATE);
+
+ /* Wait for 2 reference cycle before checking ACK bit */
+ udelay(1);
+
+ /*
+ * PLL will latch the new L, Alpha and freq control word.
+ * PLL will respond by raising PLL_ACK_LATCH output when new programming
+ * has been latched in and PLL is being updated. When
+ * UPDATE_LOGIC_BYPASS bit is not set, PLL_UPDATE will be cleared
+ * automatically by hardware when PLL_ACK_LATCH is asserted by PLL.
+ */
+ if (mode & PLL_UPDATE_BYPASS) {
+ ret = wait_for_pll_update_ack_set(pll);
+ if (ret)
+ return ret;
+
+ regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), PLL_UPDATE, 0);
+ } else {
+ ret = wait_for_pll_update(pll);
+ if (ret)
+ return ret;
+ }
+
+ ret = wait_for_pll_update_ack_clear(pll);
+ if (ret)
+ return ret;
+
+ /* Wait for PLL output to stabilize */
+ udelay(10);
+
+ return 0;
+}
+
+static int clk_alpha_pll_update_latch(struct clk_alpha_pll *pll,
+ int (*is_enabled)(struct clk_hw *))
+{
+ if (!is_enabled(&pll->clkr.hw) ||
+ !(pll->flags & SUPPORTS_DYNAMIC_UPDATE))
+ return 0;
+
+ return __clk_alpha_pll_update_latch(pll);
+}
+
+static int __clk_alpha_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate,
+ int (*is_enabled)(struct clk_hw *))
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ const struct pll_vco *vco;
+ u32 l, alpha_width = pll_alpha_width(pll);
+ u64 a;
+
+ rate = alpha_pll_round_rate(rate, prate, &l, &a, alpha_width);
+ vco = alpha_pll_find_vco(pll, rate);
+ if (pll->vco_table && !vco) {
+ pr_err("alpha pll not in a valid vco range\n");
+ return -EINVAL;
+ }
+
+ regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
+
+ if (alpha_width > ALPHA_BITWIDTH)
+ a <<= alpha_width - ALPHA_BITWIDTH;
+
+ if (alpha_width > 32)
+ regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll), a >> 32);
+
+ regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a);
+
+ if (vco) {
+ regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
+ PLL_VCO_MASK << PLL_VCO_SHIFT,
+ vco->val << PLL_VCO_SHIFT);
+ }
+
+ regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
+ PLL_ALPHA_EN, PLL_ALPHA_EN);
+
+ return clk_alpha_pll_update_latch(pll, is_enabled);
+}
+
+static int clk_alpha_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ return __clk_alpha_pll_set_rate(hw, rate, prate,
+ clk_alpha_pll_is_enabled);
+}
+
+static int clk_alpha_pll_hwfsm_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ return __clk_alpha_pll_set_rate(hw, rate, prate,
+ clk_alpha_pll_hwfsm_is_enabled);
+}
+
+static long clk_alpha_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 l, alpha_width = pll_alpha_width(pll);
+ u64 a;
+ unsigned long min_freq, max_freq;
+
+ rate = alpha_pll_round_rate(rate, *prate, &l, &a, alpha_width);
+ if (!pll->vco_table || alpha_pll_find_vco(pll, rate))
+ return rate;
+
+ min_freq = pll->vco_table[0].min_freq;
+ max_freq = pll->vco_table[pll->num_vco - 1].max_freq;
+
+ return clamp(rate, min_freq, max_freq);
+}
+
+static unsigned long
+alpha_huayra_pll_calc_rate(u64 prate, u32 l, u32 a)
+{
+ /*
+ * a contains 16 bit alpha_val in two’s compliment number in the range
+ * of [-0.5, 0.5).
+ */
+ if (a >= BIT(PLL_HUAYRA_ALPHA_WIDTH - 1))
+ l -= 1;
+
+ return (prate * l) + (prate * a >> PLL_HUAYRA_ALPHA_WIDTH);
+}
+
+static unsigned long
+alpha_huayra_pll_round_rate(unsigned long rate, unsigned long prate,
+ u32 *l, u32 *a)
+{
+ u64 remainder;
+ u64 quotient;
+
+ quotient = rate;
+ remainder = do_div(quotient, prate);
+ *l = quotient;
+
+ if (!remainder) {
+ *a = 0;
+ return rate;
+ }
+
+ quotient = remainder << PLL_HUAYRA_ALPHA_WIDTH;
+ remainder = do_div(quotient, prate);
+
+ if (remainder)
+ quotient++;
+
+ /*
+ * alpha_val should be in two’s compliment number in the range
+ * of [-0.5, 0.5) so if quotient >= 0.5 then increment the l value
+ * since alpha value will be subtracted in this case.
+ */
+ if (quotient >= BIT(PLL_HUAYRA_ALPHA_WIDTH - 1))
+ *l += 1;
+
+ *a = quotient;
+ return alpha_huayra_pll_calc_rate(prate, *l, *a);
+}
+
+static unsigned long
+alpha_pll_huayra_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ u64 rate = parent_rate, tmp;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 l, alpha = 0, ctl, alpha_m, alpha_n;
+
+ regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
+ regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
+
+ if (ctl & PLL_ALPHA_EN) {
+ regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &alpha);
+ /*
+ * Depending upon alpha_mode, it can be treated as M/N value or
+ * as a two’s compliment number. When alpha_mode=1,
+ * pll_alpha_val<15:8>=M and pll_apla_val<7:0>=N
+ *
+ * Fout=FIN*(L+(M/N))
+ *
+ * M is a signed number (-128 to 127) and N is unsigned
+ * (0 to 255). M/N has to be within +/-0.5.
+ *
+ * When alpha_mode=0, it is a two’s compliment number in the
+ * range [-0.5, 0.5).
+ *
+ * Fout=FIN*(L+(alpha_val)/2^16)
+ *
+ * where alpha_val is two’s compliment number.
+ */
+ if (!(ctl & PLL_ALPHA_MODE))
+ return alpha_huayra_pll_calc_rate(rate, l, alpha);
+
+ alpha_m = alpha >> PLL_HUAYRA_M_SHIFT & PLL_HUAYRA_M_MASK;
+ alpha_n = alpha >> PLL_HUAYRA_N_SHIFT & PLL_HUAYRA_N_MASK;
+
+ rate *= l;
+ tmp = parent_rate;
+ if (alpha_m >= BIT(PLL_HUAYRA_M_WIDTH - 1)) {
+ alpha_m = BIT(PLL_HUAYRA_M_WIDTH) - alpha_m;
+ tmp *= alpha_m;
+ do_div(tmp, alpha_n);
+ rate -= tmp;
+ } else {
+ tmp *= alpha_m;
+ do_div(tmp, alpha_n);
+ rate += tmp;
+ }
+
+ return rate;
+ }
+
+ return alpha_huayra_pll_calc_rate(rate, l, alpha);
+}
+
+static int alpha_pll_huayra_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 l, a, ctl, cur_alpha = 0;
+
+ rate = alpha_huayra_pll_round_rate(rate, prate, &l, &a);
+
+ regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
+
+ if (ctl & PLL_ALPHA_EN)
+ regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &cur_alpha);
+
+ /*
+ * Huayra PLL supports PLL dynamic programming. User can change L_VAL,
+ * without having to go through the power on sequence.
+ */
+ if (clk_alpha_pll_is_enabled(hw)) {
+ if (cur_alpha != a) {
+ pr_err("clock needs to be gated %s\n",
+ clk_hw_get_name(hw));
+ return -EBUSY;
+ }
+
+ regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
+ /* Ensure that the write above goes to detect L val change. */
+ mb();
+ return wait_for_pll_enable_lock(pll);
+ }
+
+ regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
+ regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a);
+
+ if (a == 0)
+ regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
+ PLL_ALPHA_EN, 0x0);
+ else
+ regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
+ PLL_ALPHA_EN | PLL_ALPHA_MODE, PLL_ALPHA_EN);
+
+ return 0;
+}
+
+static long alpha_pll_huayra_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ u32 l, a;
+
+ return alpha_huayra_pll_round_rate(rate, *prate, &l, &a);
+}
+
+const struct clk_ops clk_alpha_pll_ops = {
+ .enable = clk_alpha_pll_enable,
+ .disable = clk_alpha_pll_disable,
+ .is_enabled = clk_alpha_pll_is_enabled,
+ .recalc_rate = clk_alpha_pll_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+ .set_rate = clk_alpha_pll_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_ops);
+
+const struct clk_ops clk_alpha_pll_huayra_ops = {
+ .enable = clk_alpha_pll_enable,
+ .disable = clk_alpha_pll_disable,
+ .is_enabled = clk_alpha_pll_is_enabled,
+ .recalc_rate = alpha_pll_huayra_recalc_rate,
+ .round_rate = alpha_pll_huayra_round_rate,
+ .set_rate = alpha_pll_huayra_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_huayra_ops);
+
+const struct clk_ops clk_alpha_pll_hwfsm_ops = {
+ .enable = clk_alpha_pll_hwfsm_enable,
+ .disable = clk_alpha_pll_hwfsm_disable,
+ .is_enabled = clk_alpha_pll_hwfsm_is_enabled,
+ .recalc_rate = clk_alpha_pll_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+ .set_rate = clk_alpha_pll_hwfsm_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_hwfsm_ops);
+
+static unsigned long
+clk_alpha_pll_postdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+ u32 ctl;
+
+ regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
+
+ ctl >>= PLL_POST_DIV_SHIFT;
+ ctl &= PLL_POST_DIV_MASK(pll);
+
+ return parent_rate >> fls(ctl);
+}
+
+static const struct clk_div_table clk_alpha_div_table[] = {
+ { 0x0, 1 },
+ { 0x1, 2 },
+ { 0x3, 4 },
+ { 0x7, 8 },
+ { 0xf, 16 },
+ { }
+};
+
+static const struct clk_div_table clk_alpha_2bit_div_table[] = {
+ { 0x0, 1 },
+ { 0x1, 2 },
+ { 0x3, 4 },
+ { }
+};
+
+static long
+clk_alpha_pll_postdiv_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+ const struct clk_div_table *table;
+
+ if (pll->width == 2)
+ table = clk_alpha_2bit_div_table;
+ else
+ table = clk_alpha_div_table;
+
+ return divider_round_rate(hw, rate, prate, table,
+ pll->width, CLK_DIVIDER_POWER_OF_TWO);
+}
+
+static long
+clk_alpha_pll_postdiv_round_ro_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+ u32 ctl, div;
+
+ regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
+
+ ctl >>= PLL_POST_DIV_SHIFT;
+ ctl &= BIT(pll->width) - 1;
+ div = 1 << fls(ctl);
+
+ if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)
+ *prate = clk_hw_round_rate(clk_hw_get_parent(hw), div * rate);
+
+ return DIV_ROUND_UP_ULL((u64)*prate, div);
+}
+
+static int clk_alpha_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+ int div;
+
+ /* 16 -> 0xf, 8 -> 0x7, 4 -> 0x3, 2 -> 0x1, 1 -> 0x0 */
+ div = DIV_ROUND_UP_ULL((u64)parent_rate, rate) - 1;
+
+ return regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
+ PLL_POST_DIV_MASK(pll) << PLL_POST_DIV_SHIFT,
+ div << PLL_POST_DIV_SHIFT);
+}
+
+const struct clk_ops clk_alpha_pll_postdiv_ops = {
+ .recalc_rate = clk_alpha_pll_postdiv_recalc_rate,
+ .round_rate = clk_alpha_pll_postdiv_round_rate,
+ .set_rate = clk_alpha_pll_postdiv_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_ops);
+
+const struct clk_ops clk_alpha_pll_postdiv_ro_ops = {
+ .round_rate = clk_alpha_pll_postdiv_round_ro_rate,
+ .recalc_rate = clk_alpha_pll_postdiv_recalc_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_ro_ops);
+
+void clk_fabia_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config)
+{
+ u32 val, mask;
+
+ if (config->l)
+ regmap_write(regmap, PLL_L_VAL(pll), config->l);
+
+ if (config->alpha)
+ regmap_write(regmap, PLL_FRAC(pll), config->alpha);
+
+ if (config->config_ctl_val)
+ regmap_write(regmap, PLL_CONFIG_CTL(pll),
+ config->config_ctl_val);
+
+ if (config->post_div_mask) {
+ mask = config->post_div_mask;
+ val = config->post_div_val;
+ regmap_update_bits(regmap, PLL_USER_CTL(pll), mask, val);
+ }
+
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_UPDATE_BYPASS,
+ PLL_UPDATE_BYPASS);
+
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N);
+}
+<<<<<<<
+=======
+EXPORT_SYMBOL_GPL(clk_fabia_pll_configure);
+>>>>>>>
+
+static int alpha_pll_fabia_enable(struct clk_hw *hw)
+{
+ int ret;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val, opmode_val;
+<<<<<<<
+ struct regmap *regmap = pll->clkr.regmap;
+
+ ret = regmap_read(regmap, PLL_MODE(pll), &val);
+=======
+
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+>>>>>>>
+ if (ret)
+ return ret;
+
+ /* If in FSM mode, just vote for it */
+ if (val & PLL_VOTE_FSM_ENA) {
+ ret = clk_enable_regmap(hw);
+ if (ret)
+ return ret;
+ return wait_for_pll_enable_active(pll);
+ }
+
+<<<<<<<
+ /* Read opmode value */
+ ret = regmap_read(pll->clkr.regmap, PLL_OPMODE(pll), &opmode_val);
+=======
+ ret = regmap_read(regmap, PLL_OPMODE(pll), &opmode_val);
+>>>>>>>
+ if (ret)
+ return ret;
+
+ /* Skip If PLL is already running */
+ if ((opmode_val & FABIA_OPMODE_RUN) && (val & PLL_OUTCTRL))
+ return 0;
+
+<<<<<<<
+ /* Disable PLL output */
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
+ PLL_OUTCTRL, 0);
+ if (ret)
+ return ret;
+
+ /* Set Operation mode to STANBY */
+ ret = regmap_write(pll->clkr.regmap, PLL_OPMODE(pll),
+ FABIA_OPMODE_STANDBY);
+ if (ret)
+ return ret;
+
+ /* PLL should be in STANDBY mode before continuing */
+ mb();
+
+ /* Bring PLL out of reset */
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
+ PLL_RESET_N, PLL_RESET_N);
+ if (ret)
+ return ret;
+
+ /* Set Operation mode to RUN */
+ ret = regmap_write(pll->clkr.regmap, PLL_OPMODE(pll),
+ FABIA_OPMODE_RUN);
+=======
+ ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(regmap, PLL_OPMODE(pll), FABIA_OPMODE_STANDBY);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N,
+ PLL_RESET_N);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(regmap, PLL_OPMODE(pll), FABIA_OPMODE_RUN);
+>>>>>>>
+ if (ret)
+ return ret;
+
+ ret = wait_for_pll_enable_lock(pll);
+ if (ret)
+ return ret;
+
+<<<<<<<
+ /* Enable the main PLL output */
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
+ FABIA_PLL_OUT_MASK, FABIA_PLL_OUT_MASK);
+ if (ret)
+ return ret;
+
+ /* Enable PLL outputs */
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
+ PLL_OUTCTRL, PLL_OUTCTRL);
+ if (ret)
+ return ret;
+
+ /* Ensure that the write above goes through before returning. */
+ mb();
+
+ return ret;
+=======
+ ret = regmap_update_bits(regmap, PLL_USER_CTL(pll),
+ FABIA_PLL_OUT_MASK, FABIA_PLL_OUT_MASK);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL,
+ PLL_OUTCTRL);
+>>>>>>>
+}
+
+static void alpha_pll_fabia_disable(struct clk_hw *hw)
+{
+ int ret;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val;
+<<<<<<<
+ struct regmap *regmap = pll->clkr.regmap;
+
+ ret = regmap_read(regmap, PLL_MODE(pll), &val);
+=======
+
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+>>>>>>>
+ if (ret)
+ return;
+
+ /* If in FSM mode, just unvote it */
+ if (val & PLL_FSM_ENA) {
+ clk_disable_regmap(hw);
+ return;
+ }
+
+<<<<<<<
+ /* Disable PLL outputs */
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
+ PLL_OUTCTRL, 0);
+=======
+ ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0);
+>>>>>>>
+ if (ret)
+ return;
+
+ /* Disable main outputs */
+<<<<<<<
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
+ FABIA_PLL_OUT_MASK, 0);
+=======
+ ret = regmap_update_bits(regmap, PLL_USER_CTL(pll), FABIA_PLL_OUT_MASK,
+ 0);
+>>>>>>>
+ if (ret)
+ return;
+
+ /* Place the PLL in STANDBY */
+<<<<<<<
+ regmap_write(regmap, PLL_OPMODE(pll), FABIA_OPMODE_STANDBY);
+=======
+ ret = regmap_write(pll->clkr.regmap, PLL_OPMODE(pll),
+ FABIA_OPMODE_STANDBY);
+ if (ret)
+ return;
+>>>>>>>
+}
+
+static unsigned long alpha_pll_fabia_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 l, frac, alpha_width = pll_alpha_width(pll);
+
+ regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
+ regmap_read(pll->clkr.regmap, PLL_FRAC(pll), &frac);
+
+ return alpha_pll_calc_rate(parent_rate, l, frac, alpha_width);
+}
+
+static int alpha_pll_fabia_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val, l, alpha_width = pll_alpha_width(pll);
+ u64 a;
+ unsigned long rrate;
+ int ret = 0;
+
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return ret;
+
+ rrate = alpha_pll_round_rate(rate, prate, &l, &a, alpha_width);
+
+ /*
+ * Due to limited number of bits for fractional rate programming, the
+ * rounded up rate could be marginally higher than the requested rate.
+ */
+ if (rrate > (rate + FABIA_PLL_RATE_MARGIN) || rrate < rate) {
+ pr_err("Call set rate on the PLL with rounded rates!\n");
+ return -EINVAL;
+ }
+
+ regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
+ regmap_write(pll->clkr.regmap, PLL_FRAC(pll), a);
+
+ return __clk_alpha_pll_update_latch(pll);
+}
+
+const struct clk_ops clk_alpha_pll_fabia_ops = {
+ .enable = alpha_pll_fabia_enable,
+ .disable = alpha_pll_fabia_disable,
+ .is_enabled = clk_alpha_pll_is_enabled,
+ .set_rate = alpha_pll_fabia_set_rate,
+ .recalc_rate = alpha_pll_fabia_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_fabia_ops);
+
+const struct clk_ops clk_alpha_pll_fixed_fabia_ops = {
+ .enable = alpha_pll_fabia_enable,
+ .disable = alpha_pll_fabia_disable,
+ .is_enabled = clk_alpha_pll_is_enabled,
+ .recalc_rate = alpha_pll_fabia_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_fixed_fabia_ops);
+
+static unsigned long clk_alpha_pll_postdiv_fabia_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+ u32 i, div = 1, val;
+ int ret;
+
+ if (!pll->post_div_table) {
+ pr_err("Missing the post_div_table for the PLL\n");
+ return -EINVAL;
+ }
+
+ ret = regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &val);
+ if (ret)
+ return ret;
+
+ val >>= pll->post_div_shift;
+ val &= BIT(pll->width) - 1;
+
+ for (i = 0; i < pll->num_post_div; i++) {
+ if (pll->post_div_table[i].val == val) {
+ div = pll->post_div_table[i].div;
+ break;
+ }
+ }
+
+ return (parent_rate / div);
+}
+
+static long clk_alpha_pll_postdiv_fabia_round_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long *prate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+
+ if (!pll->post_div_table) {
+ pr_err("Missing the post_div_table for the PLL\n");
+ return -EINVAL;
+ }
+
+ return divider_round_rate(hw, rate, prate, pll->post_div_table,
+ pll->width, CLK_DIVIDER_ROUND_CLOSEST);
+}
+
+static int clk_alpha_pll_postdiv_fabia_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+ int i, val = 0, div, ret;
+
+ /*
+ * If the PLL is in FSM mode, then treat set_rate callback as a
+ * no-operation.
+ */
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return ret;
+
+ if (val & PLL_VOTE_FSM_ENA)
+ return 0;
+
+ if (!pll->post_div_table) {
+ pr_err("Missing the post_div_table for the PLL\n");
+ return -EINVAL;
+ }
+
+ div = DIV_ROUND_UP_ULL((u64)parent_rate, rate);
+ for (i = 0; i < pll->num_post_div; i++) {
+ if (pll->post_div_table[i].div == div) {
+ val = pll->post_div_table[i].val;
+ break;
+ }
+ }
+
+ return regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
+ (BIT(pll->width) - 1) << pll->post_div_shift,
+ val << pll->post_div_shift);
+}
+
+const struct clk_ops clk_alpha_pll_postdiv_fabia_ops = {
+ .recalc_rate = clk_alpha_pll_postdiv_fabia_recalc_rate,
+ .round_rate = clk_alpha_pll_postdiv_fabia_round_rate,
+ .set_rate = clk_alpha_pll_postdiv_fabia_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_fabia_ops);
diff --git a/rr-cache/5a7fbb8eb469ab7e1359820fd91982b9b6c7f2a6/preimage.3 b/rr-cache/5a7fbb8eb469ab7e1359820fd91982b9b6c7f2a6/preimage.3
new file mode 100644
index 0000000..f921d1f
--- /dev/null
+++ b/rr-cache/5a7fbb8eb469ab7e1359820fd91982b9b6c7f2a6/preimage.3
@@ -0,0 +1,1205 @@
+/*
+ * Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+
+#include "clk-alpha-pll.h"
+#include "common.h"
+
+#define PLL_MODE(p) ((p)->offset + 0x0)
+# define PLL_OUTCTRL BIT(0)
+# define PLL_BYPASSNL BIT(1)
+# define PLL_RESET_N BIT(2)
+# define PLL_OFFLINE_REQ BIT(7)
+# define PLL_LOCK_COUNT_SHIFT 8
+# define PLL_LOCK_COUNT_MASK 0x3f
+# define PLL_BIAS_COUNT_SHIFT 14
+# define PLL_BIAS_COUNT_MASK 0x3f
+# define PLL_VOTE_FSM_ENA BIT(20)
+# define PLL_FSM_ENA BIT(20)
+# define PLL_VOTE_FSM_RESET BIT(21)
+# define PLL_UPDATE BIT(22)
+# define PLL_UPDATE_BYPASS BIT(23)
+# define PLL_OFFLINE_ACK BIT(28)
+# define ALPHA_PLL_ACK_LATCH BIT(29)
+# define PLL_ACTIVE_FLAG BIT(30)
+# define PLL_LOCK_DET BIT(31)
+
+#define PLL_L_VAL(p) ((p)->offset + (p)->regs[PLL_OFF_L_VAL])
+#define PLL_ALPHA_VAL(p) ((p)->offset + (p)->regs[PLL_OFF_ALPHA_VAL])
+#define PLL_ALPHA_VAL_U(p) ((p)->offset + (p)->regs[PLL_OFF_ALPHA_VAL_U])
+
+#define PLL_USER_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_USER_CTL])
+# define PLL_POST_DIV_SHIFT 8
+# define PLL_POST_DIV_MASK(p) GENMASK((p)->width, 0)
+# define PLL_ALPHA_EN BIT(24)
+# define PLL_ALPHA_MODE BIT(25)
+# define PLL_VCO_SHIFT 20
+# define PLL_VCO_MASK 0x3
+
+#define PLL_USER_CTL_U(p) ((p)->offset + (p)->regs[PLL_OFF_USER_CTL_U])
+
+#define PLL_CONFIG_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_CONFIG_CTL])
+#define PLL_CONFIG_CTL_U(p) ((p)->offset + (p)->regs[PLL_OFF_CONFIG_CTL_U])
+#define PLL_TEST_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL])
+#define PLL_TEST_CTL_U(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL_U])
+#define PLL_STATUS(p) ((p)->offset + (p)->regs[PLL_OFF_STATUS])
+#define PLL_OPMODE(p) ((p)->offset + (p)->regs[PLL_OFF_OPMODE])
+#define PLL_FRAC(p) ((p)->offset + (p)->regs[PLL_OFF_FRAC])
+
+const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
+ [CLK_ALPHA_PLL_TYPE_DEFAULT] = {
+ [PLL_OFF_L_VAL] = 0x04,
+ [PLL_OFF_ALPHA_VAL] = 0x08,
+ [PLL_OFF_ALPHA_VAL_U] = 0x0c,
+ [PLL_OFF_USER_CTL] = 0x10,
+ [PLL_OFF_USER_CTL_U] = 0x14,
+ [PLL_OFF_CONFIG_CTL] = 0x18,
+ [PLL_OFF_TEST_CTL] = 0x1c,
+ [PLL_OFF_TEST_CTL_U] = 0x20,
+ [PLL_OFF_STATUS] = 0x24,
+ },
+ [CLK_ALPHA_PLL_TYPE_HUAYRA] = {
+ [PLL_OFF_L_VAL] = 0x04,
+ [PLL_OFF_ALPHA_VAL] = 0x08,
+ [PLL_OFF_USER_CTL] = 0x10,
+ [PLL_OFF_CONFIG_CTL] = 0x14,
+ [PLL_OFF_CONFIG_CTL_U] = 0x18,
+ [PLL_OFF_TEST_CTL] = 0x1c,
+ [PLL_OFF_TEST_CTL_U] = 0x20,
+ [PLL_OFF_STATUS] = 0x24,
+ },
+ [CLK_ALPHA_PLL_TYPE_BRAMMO] = {
+ [PLL_OFF_L_VAL] = 0x04,
+ [PLL_OFF_ALPHA_VAL] = 0x08,
+ [PLL_OFF_ALPHA_VAL_U] = 0x0c,
+ [PLL_OFF_USER_CTL] = 0x10,
+ [PLL_OFF_CONFIG_CTL] = 0x18,
+ [PLL_OFF_TEST_CTL] = 0x1c,
+ [PLL_OFF_STATUS] = 0x24,
+ },
+ [CLK_ALPHA_PLL_TYPE_FABIA] = {
+ [PLL_OFF_L_VAL] = 0x04,
+ [PLL_OFF_USER_CTL] = 0x0c,
+ [PLL_OFF_USER_CTL_U] = 0x10,
+ [PLL_OFF_CONFIG_CTL] = 0x14,
+ [PLL_OFF_CONFIG_CTL_U] = 0x18,
+ [PLL_OFF_TEST_CTL] = 0x1c,
+ [PLL_OFF_TEST_CTL_U] = 0x20,
+ [PLL_OFF_STATUS] = 0x24,
+ [PLL_OFF_OPMODE] = 0x2c,
+ [PLL_OFF_FRAC] = 0x38,
+ },
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
+
+/*
+ * Even though 40 bits are present, use only 32 for ease of calculation.
+ */
+#define ALPHA_REG_BITWIDTH 40
+#define ALPHA_REG_16BIT_WIDTH 16
+#define ALPHA_BITWIDTH 32U
+#define ALPHA_SHIFT(w) min(w, ALPHA_BITWIDTH)
+
+#define PLL_HUAYRA_M_WIDTH 8
+#define PLL_HUAYRA_M_SHIFT 8
+#define PLL_HUAYRA_M_MASK 0xff
+#define PLL_HUAYRA_N_SHIFT 0
+#define PLL_HUAYRA_N_MASK 0xff
+#define PLL_HUAYRA_ALPHA_WIDTH 16
+
+#define FABIA_OPMODE_STANDBY 0x0
+#define FABIA_OPMODE_RUN 0x1
+
+#define FABIA_PLL_OUT_MASK 0x7
+#define FABIA_PLL_RATE_MARGIN 500
+
+#define pll_alpha_width(p) \
+ ((PLL_ALPHA_VAL_U(p) - PLL_ALPHA_VAL(p) == 4) ? \
+ ALPHA_REG_BITWIDTH : ALPHA_REG_16BIT_WIDTH)
+
+#define pll_has_64bit_config(p) ((PLL_CONFIG_CTL_U(p) - PLL_CONFIG_CTL(p)) == 4)
+
+#define to_clk_alpha_pll(_hw) container_of(to_clk_regmap(_hw), \
+ struct clk_alpha_pll, clkr)
+
+#define to_clk_alpha_pll_postdiv(_hw) container_of(to_clk_regmap(_hw), \
+ struct clk_alpha_pll_postdiv, clkr)
+
+static int wait_for_pll(struct clk_alpha_pll *pll, u32 mask, bool inverse,
+ const char *action)
+{
+ u32 val;
+ int count;
+ int ret;
+ const char *name = clk_hw_get_name(&pll->clkr.hw);
+
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return ret;
+
+ for (count = 100; count > 0; count--) {
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return ret;
+ if (inverse && !(val & mask))
+ return 0;
+ else if ((val & mask) == mask)
+ return 0;
+
+ udelay(1);
+ }
+
+ WARN(1, "%s failed to %s!\n", name, action);
+ return -ETIMEDOUT;
+}
+
+#define wait_for_pll_enable_active(pll) \
+ wait_for_pll(pll, PLL_ACTIVE_FLAG, 0, "enable")
+
+#define wait_for_pll_enable_lock(pll) \
+ wait_for_pll(pll, PLL_LOCK_DET, 0, "enable")
+
+#define wait_for_pll_disable(pll) \
+ wait_for_pll(pll, PLL_ACTIVE_FLAG, 1, "disable")
+
+#define wait_for_pll_offline(pll) \
+ wait_for_pll(pll, PLL_OFFLINE_ACK, 0, "offline")
+
+#define wait_for_pll_update(pll) \
+ wait_for_pll(pll, PLL_UPDATE, 1, "update")
+
+#define wait_for_pll_update_ack_set(pll) \
+ wait_for_pll(pll, ALPHA_PLL_ACK_LATCH, 0, "update_ack_set")
+
+#define wait_for_pll_update_ack_clear(pll) \
+ wait_for_pll(pll, ALPHA_PLL_ACK_LATCH, 1, "update_ack_clear")
+
+void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config)
+{
+ u32 val, mask;
+
+ regmap_write(regmap, PLL_L_VAL(pll), config->l);
+ regmap_write(regmap, PLL_ALPHA_VAL(pll), config->alpha);
+ regmap_write(regmap, PLL_CONFIG_CTL(pll), config->config_ctl_val);
+
+ if (pll_has_64bit_config(pll))
+ regmap_write(regmap, PLL_CONFIG_CTL_U(pll),
+ config->config_ctl_hi_val);
+
+ if (pll_alpha_width(pll) > 32)
+ regmap_write(regmap, PLL_ALPHA_VAL_U(pll), config->alpha_hi);
+
+ val = config->main_output_mask;
+ val |= config->aux_output_mask;
+ val |= config->aux2_output_mask;
+ val |= config->early_output_mask;
+ val |= config->pre_div_val;
+ val |= config->post_div_val;
+ val |= config->vco_val;
+ val |= config->alpha_en_mask;
+ val |= config->alpha_mode_mask;
+
+ mask = config->main_output_mask;
+ mask |= config->aux_output_mask;
+ mask |= config->aux2_output_mask;
+ mask |= config->early_output_mask;
+ mask |= config->pre_div_mask;
+ mask |= config->post_div_mask;
+ mask |= config->vco_mask;
+
+ regmap_update_bits(regmap, PLL_USER_CTL(pll), mask, val);
+
+ if (pll->flags & SUPPORTS_FSM_MODE)
+ qcom_pll_set_fsm_mode(regmap, PLL_MODE(pll), 6, 0);
+}
+EXPORT_SYMBOL_GPL(clk_alpha_pll_configure);
+
+static int clk_alpha_pll_hwfsm_enable(struct clk_hw *hw)
+{
+ int ret;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val;
+
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return ret;
+
+ val |= PLL_FSM_ENA;
+
+ if (pll->flags & SUPPORTS_OFFLINE_REQ)
+ val &= ~PLL_OFFLINE_REQ;
+
+ ret = regmap_write(pll->clkr.regmap, PLL_MODE(pll), val);
+ if (ret)
+ return ret;
+
+ /* Make sure enable request goes through before waiting for update */
+ mb();
+
+ return wait_for_pll_enable_active(pll);
+}
+
+static void clk_alpha_pll_hwfsm_disable(struct clk_hw *hw)
+{
+ int ret;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val;
+
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return;
+
+ if (pll->flags & SUPPORTS_OFFLINE_REQ) {
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
+ PLL_OFFLINE_REQ, PLL_OFFLINE_REQ);
+ if (ret)
+ return;
+
+ ret = wait_for_pll_offline(pll);
+ if (ret)
+ return;
+ }
+
+ /* Disable hwfsm */
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
+ PLL_FSM_ENA, 0);
+ if (ret)
+ return;
+
+ wait_for_pll_disable(pll);
+}
+
+static int pll_is_enabled(struct clk_hw *hw, u32 mask)
+{
+ int ret;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val;
+
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return ret;
+
+ return !!(val & mask);
+}
+
+static int clk_alpha_pll_hwfsm_is_enabled(struct clk_hw *hw)
+{
+ return pll_is_enabled(hw, PLL_ACTIVE_FLAG);
+}
+
+static int clk_alpha_pll_is_enabled(struct clk_hw *hw)
+{
+ return pll_is_enabled(hw, PLL_LOCK_DET);
+}
+
+static int clk_alpha_pll_enable(struct clk_hw *hw)
+{
+ int ret;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val, mask;
+
+ mask = PLL_OUTCTRL | PLL_RESET_N | PLL_BYPASSNL;
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return ret;
+
+ /* If in FSM mode, just vote for it */
+ if (val & PLL_VOTE_FSM_ENA) {
+ ret = clk_enable_regmap(hw);
+ if (ret)
+ return ret;
+ return wait_for_pll_enable_active(pll);
+ }
+
+ /* Skip if already enabled */
+ if ((val & mask) == mask)
+ return 0;
+
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
+ PLL_BYPASSNL, PLL_BYPASSNL);
+ if (ret)
+ return ret;
+
+ /*
+ * H/W requires a 5us delay between disabling the bypass and
+ * de-asserting the reset.
+ */
+ mb();
+ udelay(5);
+
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
+ PLL_RESET_N, PLL_RESET_N);
+ if (ret)
+ return ret;
+
+ ret = wait_for_pll_enable_lock(pll);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
+ PLL_OUTCTRL, PLL_OUTCTRL);
+
+ /* Ensure that the write above goes through before returning. */
+ mb();
+ return ret;
+}
+
+static void clk_alpha_pll_disable(struct clk_hw *hw)
+{
+ int ret;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val, mask;
+
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return;
+
+ /* If in FSM mode, just unvote it */
+ if (val & PLL_VOTE_FSM_ENA) {
+ clk_disable_regmap(hw);
+ return;
+ }
+
+ mask = PLL_OUTCTRL;
+ regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), mask, 0);
+
+ /* Delay of 2 output clock ticks required until output is disabled */
+ mb();
+ udelay(1);
+
+ mask = PLL_RESET_N | PLL_BYPASSNL;
+ regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), mask, 0);
+}
+
+static unsigned long
+alpha_pll_calc_rate(u64 prate, u32 l, u32 a, u32 alpha_width)
+{
+ return (prate * l) + ((prate * a) >> ALPHA_SHIFT(alpha_width));
+}
+
+static unsigned long
+alpha_pll_round_rate(unsigned long rate, unsigned long prate, u32 *l, u64 *a,
+ u32 alpha_width)
+{
+ u64 remainder;
+ u64 quotient;
+
+ quotient = rate;
+ remainder = do_div(quotient, prate);
+ *l = quotient;
+
+ if (!remainder) {
+ *a = 0;
+ return rate;
+ }
+
+ /* Upper ALPHA_BITWIDTH bits of Alpha */
+ quotient = remainder << ALPHA_SHIFT(alpha_width);
+
+ remainder = do_div(quotient, prate);
+
+ if (remainder)
+ quotient++;
+
+ *a = quotient;
+ return alpha_pll_calc_rate(prate, *l, *a, alpha_width);
+}
+
+static const struct pll_vco *
+alpha_pll_find_vco(const struct clk_alpha_pll *pll, unsigned long rate)
+{
+ const struct pll_vco *v = pll->vco_table;
+ const struct pll_vco *end = v + pll->num_vco;
+
+ for (; v < end; v++)
+ if (rate >= v->min_freq && rate <= v->max_freq)
+ return v;
+
+ return NULL;
+}
+
+static unsigned long
+clk_alpha_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ u32 l, low, high, ctl;
+ u64 a = 0, prate = parent_rate;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 alpha_width = pll_alpha_width(pll);
+
+ regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
+
+ regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
+ if (ctl & PLL_ALPHA_EN) {
+ regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &low);
+ if (alpha_width > 32) {
+ regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll),
+ &high);
+ a = (u64)high << 32 | low;
+ } else {
+ a = low & GENMASK(alpha_width - 1, 0);
+ }
+
+ if (alpha_width > ALPHA_BITWIDTH)
+ a >>= alpha_width - ALPHA_BITWIDTH;
+ }
+
+ return alpha_pll_calc_rate(prate, l, a, alpha_width);
+}
+
+
+static int __clk_alpha_pll_update_latch(struct clk_alpha_pll *pll)
+{
+ int ret;
+ u32 mode;
+
+ regmap_read(pll->clkr.regmap, PLL_MODE(pll), &mode);
+
+ /* Latch the input to the PLL */
+ regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), PLL_UPDATE,
+ PLL_UPDATE);
+
+ /* Wait for 2 reference cycle before checking ACK bit */
+ udelay(1);
+
+ /*
+ * PLL will latch the new L, Alpha and freq control word.
+ * PLL will respond by raising PLL_ACK_LATCH output when new programming
+ * has been latched in and PLL is being updated. When
+ * UPDATE_LOGIC_BYPASS bit is not set, PLL_UPDATE will be cleared
+ * automatically by hardware when PLL_ACK_LATCH is asserted by PLL.
+ */
+ if (mode & PLL_UPDATE_BYPASS) {
+ ret = wait_for_pll_update_ack_set(pll);
+ if (ret)
+ return ret;
+
+ regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), PLL_UPDATE, 0);
+ } else {
+ ret = wait_for_pll_update(pll);
+ if (ret)
+ return ret;
+ }
+
+ ret = wait_for_pll_update_ack_clear(pll);
+ if (ret)
+ return ret;
+
+ /* Wait for PLL output to stabilize */
+ udelay(10);
+
+ return 0;
+}
+
+static int clk_alpha_pll_update_latch(struct clk_alpha_pll *pll,
+ int (*is_enabled)(struct clk_hw *))
+{
+ if (!is_enabled(&pll->clkr.hw) ||
+ !(pll->flags & SUPPORTS_DYNAMIC_UPDATE))
+ return 0;
+
+ return __clk_alpha_pll_update_latch(pll);
+}
+
+static int __clk_alpha_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate,
+ int (*is_enabled)(struct clk_hw *))
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ const struct pll_vco *vco;
+ u32 l, alpha_width = pll_alpha_width(pll);
+ u64 a;
+
+ rate = alpha_pll_round_rate(rate, prate, &l, &a, alpha_width);
+ vco = alpha_pll_find_vco(pll, rate);
+ if (pll->vco_table && !vco) {
+ pr_err("alpha pll not in a valid vco range\n");
+ return -EINVAL;
+ }
+
+ regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
+
+ if (alpha_width > ALPHA_BITWIDTH)
+ a <<= alpha_width - ALPHA_BITWIDTH;
+
+ if (alpha_width > 32)
+ regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll), a >> 32);
+
+ regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a);
+
+ if (vco) {
+ regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
+ PLL_VCO_MASK << PLL_VCO_SHIFT,
+ vco->val << PLL_VCO_SHIFT);
+ }
+
+ regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
+ PLL_ALPHA_EN, PLL_ALPHA_EN);
+
+ return clk_alpha_pll_update_latch(pll, is_enabled);
+}
+
+static int clk_alpha_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ return __clk_alpha_pll_set_rate(hw, rate, prate,
+ clk_alpha_pll_is_enabled);
+}
+
+static int clk_alpha_pll_hwfsm_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ return __clk_alpha_pll_set_rate(hw, rate, prate,
+ clk_alpha_pll_hwfsm_is_enabled);
+}
+
+static long clk_alpha_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 l, alpha_width = pll_alpha_width(pll);
+ u64 a;
+ unsigned long min_freq, max_freq;
+
+ rate = alpha_pll_round_rate(rate, *prate, &l, &a, alpha_width);
+ if (!pll->vco_table || alpha_pll_find_vco(pll, rate))
+ return rate;
+
+ min_freq = pll->vco_table[0].min_freq;
+ max_freq = pll->vco_table[pll->num_vco - 1].max_freq;
+
+ return clamp(rate, min_freq, max_freq);
+}
+
+static unsigned long
+alpha_huayra_pll_calc_rate(u64 prate, u32 l, u32 a)
+{
+ /*
+ * a contains 16 bit alpha_val in two’s compliment number in the range
+ * of [-0.5, 0.5).
+ */
+ if (a >= BIT(PLL_HUAYRA_ALPHA_WIDTH - 1))
+ l -= 1;
+
+ return (prate * l) + (prate * a >> PLL_HUAYRA_ALPHA_WIDTH);
+}
+
+static unsigned long
+alpha_huayra_pll_round_rate(unsigned long rate, unsigned long prate,
+ u32 *l, u32 *a)
+{
+ u64 remainder;
+ u64 quotient;
+
+ quotient = rate;
+ remainder = do_div(quotient, prate);
+ *l = quotient;
+
+ if (!remainder) {
+ *a = 0;
+ return rate;
+ }
+
+ quotient = remainder << PLL_HUAYRA_ALPHA_WIDTH;
+ remainder = do_div(quotient, prate);
+
+ if (remainder)
+ quotient++;
+
+ /*
+ * alpha_val should be in two’s compliment number in the range
+ * of [-0.5, 0.5) so if quotient >= 0.5 then increment the l value
+ * since alpha value will be subtracted in this case.
+ */
+ if (quotient >= BIT(PLL_HUAYRA_ALPHA_WIDTH - 1))
+ *l += 1;
+
+ *a = quotient;
+ return alpha_huayra_pll_calc_rate(prate, *l, *a);
+}
+
+static unsigned long
+alpha_pll_huayra_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ u64 rate = parent_rate, tmp;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 l, alpha = 0, ctl, alpha_m, alpha_n;
+
+ regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
+ regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
+
+ if (ctl & PLL_ALPHA_EN) {
+ regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &alpha);
+ /*
+ * Depending upon alpha_mode, it can be treated as M/N value or
+ * as a two’s compliment number. When alpha_mode=1,
+ * pll_alpha_val<15:8>=M and pll_apla_val<7:0>=N
+ *
+ * Fout=FIN*(L+(M/N))
+ *
+ * M is a signed number (-128 to 127) and N is unsigned
+ * (0 to 255). M/N has to be within +/-0.5.
+ *
+ * When alpha_mode=0, it is a two’s compliment number in the
+ * range [-0.5, 0.5).
+ *
+ * Fout=FIN*(L+(alpha_val)/2^16)
+ *
+ * where alpha_val is two’s compliment number.
+ */
+ if (!(ctl & PLL_ALPHA_MODE))
+ return alpha_huayra_pll_calc_rate(rate, l, alpha);
+
+ alpha_m = alpha >> PLL_HUAYRA_M_SHIFT & PLL_HUAYRA_M_MASK;
+ alpha_n = alpha >> PLL_HUAYRA_N_SHIFT & PLL_HUAYRA_N_MASK;
+
+ rate *= l;
+ tmp = parent_rate;
+ if (alpha_m >= BIT(PLL_HUAYRA_M_WIDTH - 1)) {
+ alpha_m = BIT(PLL_HUAYRA_M_WIDTH) - alpha_m;
+ tmp *= alpha_m;
+ do_div(tmp, alpha_n);
+ rate -= tmp;
+ } else {
+ tmp *= alpha_m;
+ do_div(tmp, alpha_n);
+ rate += tmp;
+ }
+
+ return rate;
+ }
+
+ return alpha_huayra_pll_calc_rate(rate, l, alpha);
+}
+
+static int alpha_pll_huayra_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 l, a, ctl, cur_alpha = 0;
+
+ rate = alpha_huayra_pll_round_rate(rate, prate, &l, &a);
+
+ regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
+
+ if (ctl & PLL_ALPHA_EN)
+ regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &cur_alpha);
+
+ /*
+ * Huayra PLL supports PLL dynamic programming. User can change L_VAL,
+ * without having to go through the power on sequence.
+ */
+ if (clk_alpha_pll_is_enabled(hw)) {
+ if (cur_alpha != a) {
+ pr_err("clock needs to be gated %s\n",
+ clk_hw_get_name(hw));
+ return -EBUSY;
+ }
+
+ regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
+ /* Ensure that the write above goes to detect L val change. */
+ mb();
+ return wait_for_pll_enable_lock(pll);
+ }
+
+ regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
+ regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a);
+
+ if (a == 0)
+ regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
+ PLL_ALPHA_EN, 0x0);
+ else
+ regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
+ PLL_ALPHA_EN | PLL_ALPHA_MODE, PLL_ALPHA_EN);
+
+ return 0;
+}
+
+static long alpha_pll_huayra_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ u32 l, a;
+
+ return alpha_huayra_pll_round_rate(rate, *prate, &l, &a);
+}
+
+const struct clk_ops clk_alpha_pll_ops = {
+ .enable = clk_alpha_pll_enable,
+ .disable = clk_alpha_pll_disable,
+ .is_enabled = clk_alpha_pll_is_enabled,
+ .recalc_rate = clk_alpha_pll_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+ .set_rate = clk_alpha_pll_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_ops);
+
+const struct clk_ops clk_alpha_pll_huayra_ops = {
+ .enable = clk_alpha_pll_enable,
+ .disable = clk_alpha_pll_disable,
+ .is_enabled = clk_alpha_pll_is_enabled,
+ .recalc_rate = alpha_pll_huayra_recalc_rate,
+ .round_rate = alpha_pll_huayra_round_rate,
+ .set_rate = alpha_pll_huayra_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_huayra_ops);
+
+const struct clk_ops clk_alpha_pll_hwfsm_ops = {
+ .enable = clk_alpha_pll_hwfsm_enable,
+ .disable = clk_alpha_pll_hwfsm_disable,
+ .is_enabled = clk_alpha_pll_hwfsm_is_enabled,
+ .recalc_rate = clk_alpha_pll_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+ .set_rate = clk_alpha_pll_hwfsm_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_hwfsm_ops);
+
+static unsigned long
+clk_alpha_pll_postdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+ u32 ctl;
+
+ regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
+
+ ctl >>= PLL_POST_DIV_SHIFT;
+ ctl &= PLL_POST_DIV_MASK(pll);
+
+ return parent_rate >> fls(ctl);
+}
+
+static const struct clk_div_table clk_alpha_div_table[] = {
+ { 0x0, 1 },
+ { 0x1, 2 },
+ { 0x3, 4 },
+ { 0x7, 8 },
+ { 0xf, 16 },
+ { }
+};
+
+static const struct clk_div_table clk_alpha_2bit_div_table[] = {
+ { 0x0, 1 },
+ { 0x1, 2 },
+ { 0x3, 4 },
+ { }
+};
+
+static long
+clk_alpha_pll_postdiv_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+ const struct clk_div_table *table;
+
+ if (pll->width == 2)
+ table = clk_alpha_2bit_div_table;
+ else
+ table = clk_alpha_div_table;
+
+ return divider_round_rate(hw, rate, prate, table,
+ pll->width, CLK_DIVIDER_POWER_OF_TWO);
+}
+
+static long
+clk_alpha_pll_postdiv_round_ro_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+ u32 ctl, div;
+
+ regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
+
+ ctl >>= PLL_POST_DIV_SHIFT;
+ ctl &= BIT(pll->width) - 1;
+ div = 1 << fls(ctl);
+
+ if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)
+ *prate = clk_hw_round_rate(clk_hw_get_parent(hw), div * rate);
+
+ return DIV_ROUND_UP_ULL((u64)*prate, div);
+}
+
+static int clk_alpha_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+ int div;
+
+ /* 16 -> 0xf, 8 -> 0x7, 4 -> 0x3, 2 -> 0x1, 1 -> 0x0 */
+ div = DIV_ROUND_UP_ULL((u64)parent_rate, rate) - 1;
+
+ return regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
+ PLL_POST_DIV_MASK(pll) << PLL_POST_DIV_SHIFT,
+ div << PLL_POST_DIV_SHIFT);
+}
+
+const struct clk_ops clk_alpha_pll_postdiv_ops = {
+ .recalc_rate = clk_alpha_pll_postdiv_recalc_rate,
+ .round_rate = clk_alpha_pll_postdiv_round_rate,
+ .set_rate = clk_alpha_pll_postdiv_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_ops);
+
+const struct clk_ops clk_alpha_pll_postdiv_ro_ops = {
+ .round_rate = clk_alpha_pll_postdiv_round_ro_rate,
+ .recalc_rate = clk_alpha_pll_postdiv_recalc_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_ro_ops);
+
+void clk_fabia_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config)
+{
+ u32 val, mask;
+
+ if (config->l)
+ regmap_write(regmap, PLL_L_VAL(pll), config->l);
+
+ if (config->alpha)
+ regmap_write(regmap, PLL_FRAC(pll), config->alpha);
+
+ if (config->config_ctl_val)
+ regmap_write(regmap, PLL_CONFIG_CTL(pll),
+ config->config_ctl_val);
+
+ if (config->post_div_mask) {
+ mask = config->post_div_mask;
+ val = config->post_div_val;
+ regmap_update_bits(regmap, PLL_USER_CTL(pll), mask, val);
+ }
+
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_UPDATE_BYPASS,
+ PLL_UPDATE_BYPASS);
+
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N);
+}
+<<<<<<<
+=======
+EXPORT_SYMBOL_GPL(clk_fabia_pll_configure);
+>>>>>>>
+
+static int alpha_pll_fabia_enable(struct clk_hw *hw)
+{
+ int ret;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val, opmode_val;
+<<<<<<<
+ struct regmap *regmap = pll->clkr.regmap;
+
+ ret = regmap_read(regmap, PLL_MODE(pll), &val);
+=======
+
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+>>>>>>>
+ if (ret)
+ return ret;
+
+ /* If in FSM mode, just vote for it */
+ if (val & PLL_VOTE_FSM_ENA) {
+ ret = clk_enable_regmap(hw);
+ if (ret)
+ return ret;
+ return wait_for_pll_enable_active(pll);
+ }
+
+<<<<<<<
+ /* Read opmode value */
+ ret = regmap_read(pll->clkr.regmap, PLL_OPMODE(pll), &opmode_val);
+=======
+ ret = regmap_read(regmap, PLL_OPMODE(pll), &opmode_val);
+>>>>>>>
+ if (ret)
+ return ret;
+
+ /* Skip If PLL is already running */
+ if ((opmode_val & FABIA_OPMODE_RUN) && (val & PLL_OUTCTRL))
+ return 0;
+
+<<<<<<<
+ /* Disable PLL output */
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
+ PLL_OUTCTRL, 0);
+ if (ret)
+ return ret;
+
+ /* Set Operation mode to STANBY */
+ ret = regmap_write(pll->clkr.regmap, PLL_OPMODE(pll),
+ FABIA_OPMODE_STANDBY);
+ if (ret)
+ return ret;
+
+ /* PLL should be in STANDBY mode before continuing */
+ mb();
+
+ /* Bring PLL out of reset */
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
+ PLL_RESET_N, PLL_RESET_N);
+ if (ret)
+ return ret;
+
+ /* Set Operation mode to RUN */
+ ret = regmap_write(pll->clkr.regmap, PLL_OPMODE(pll),
+ FABIA_OPMODE_RUN);
+=======
+ ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(regmap, PLL_OPMODE(pll), FABIA_OPMODE_STANDBY);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N,
+ PLL_RESET_N);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(regmap, PLL_OPMODE(pll), FABIA_OPMODE_RUN);
+>>>>>>>
+ if (ret)
+ return ret;
+
+ ret = wait_for_pll_enable_lock(pll);
+ if (ret)
+ return ret;
+
+<<<<<<<
+ /* Enable the main PLL output */
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
+ FABIA_PLL_OUT_MASK, FABIA_PLL_OUT_MASK);
+ if (ret)
+ return ret;
+
+ /* Enable PLL outputs */
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
+ PLL_OUTCTRL, PLL_OUTCTRL);
+ if (ret)
+ return ret;
+
+ /* Ensure that the write above goes through before returning. */
+ mb();
+
+ return ret;
+=======
+ ret = regmap_update_bits(regmap, PLL_USER_CTL(pll),
+ FABIA_PLL_OUT_MASK, FABIA_PLL_OUT_MASK);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL,
+ PLL_OUTCTRL);
+>>>>>>>
+}
+
+static void alpha_pll_fabia_disable(struct clk_hw *hw)
+{
+ int ret;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val;
+<<<<<<<
+ struct regmap *regmap = pll->clkr.regmap;
+
+ ret = regmap_read(regmap, PLL_MODE(pll), &val);
+=======
+
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+>>>>>>>
+ if (ret)
+ return;
+
+ /* If in FSM mode, just unvote it */
+ if (val & PLL_FSM_ENA) {
+ clk_disable_regmap(hw);
+ return;
+ }
+
+<<<<<<<
+ /* Disable PLL outputs */
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
+ PLL_OUTCTRL, 0);
+=======
+ ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0);
+>>>>>>>
+ if (ret)
+ return;
+
+ /* Disable main outputs */
+<<<<<<<
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
+ FABIA_PLL_OUT_MASK, 0);
+=======
+ ret = regmap_update_bits(regmap, PLL_USER_CTL(pll), FABIA_PLL_OUT_MASK,
+ 0);
+>>>>>>>
+ if (ret)
+ return;
+
+ /* Place the PLL in STANDBY */
+<<<<<<<
+ regmap_write(regmap, PLL_OPMODE(pll), FABIA_OPMODE_STANDBY);
+=======
+ ret = regmap_write(pll->clkr.regmap, PLL_OPMODE(pll),
+ FABIA_OPMODE_STANDBY);
+ if (ret)
+ return;
+>>>>>>>
+}
+
+static unsigned long alpha_pll_fabia_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 l, frac, alpha_width = pll_alpha_width(pll);
+
+ regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
+ regmap_read(pll->clkr.regmap, PLL_FRAC(pll), &frac);
+
+ return alpha_pll_calc_rate(parent_rate, l, frac, alpha_width);
+}
+
+static int alpha_pll_fabia_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val, l, alpha_width = pll_alpha_width(pll);
+ u64 a;
+ unsigned long rrate;
+ int ret = 0;
+
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return ret;
+
+ rrate = alpha_pll_round_rate(rate, prate, &l, &a, alpha_width);
+
+ /*
+ * Due to limited number of bits for fractional rate programming, the
+ * rounded up rate could be marginally higher than the requested rate.
+ */
+ if (rrate > (rate + FABIA_PLL_RATE_MARGIN) || rrate < rate) {
+ pr_err("Call set rate on the PLL with rounded rates!\n");
+ return -EINVAL;
+ }
+
+ regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
+ regmap_write(pll->clkr.regmap, PLL_FRAC(pll), a);
+
+ return __clk_alpha_pll_update_latch(pll);
+}
+
+const struct clk_ops clk_alpha_pll_fabia_ops = {
+ .enable = alpha_pll_fabia_enable,
+ .disable = alpha_pll_fabia_disable,
+ .is_enabled = clk_alpha_pll_is_enabled,
+ .set_rate = alpha_pll_fabia_set_rate,
+ .recalc_rate = alpha_pll_fabia_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_fabia_ops);
+
+const struct clk_ops clk_alpha_pll_fixed_fabia_ops = {
+ .enable = alpha_pll_fabia_enable,
+ .disable = alpha_pll_fabia_disable,
+ .is_enabled = clk_alpha_pll_is_enabled,
+ .recalc_rate = alpha_pll_fabia_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_fixed_fabia_ops);
+
+static unsigned long clk_alpha_pll_postdiv_fabia_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+ u32 i, div = 1, val;
+ int ret;
+
+ if (!pll->post_div_table) {
+ pr_err("Missing the post_div_table for the PLL\n");
+ return -EINVAL;
+ }
+
+ ret = regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &val);
+ if (ret)
+ return ret;
+
+ val >>= pll->post_div_shift;
+ val &= BIT(pll->width) - 1;
+
+ for (i = 0; i < pll->num_post_div; i++) {
+ if (pll->post_div_table[i].val == val) {
+ div = pll->post_div_table[i].div;
+ break;
+ }
+ }
+
+ return (parent_rate / div);
+}
+
+static long clk_alpha_pll_postdiv_fabia_round_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long *prate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+
+ if (!pll->post_div_table) {
+ pr_err("Missing the post_div_table for the PLL\n");
+ return -EINVAL;
+ }
+
+ return divider_round_rate(hw, rate, prate, pll->post_div_table,
+ pll->width, CLK_DIVIDER_ROUND_CLOSEST);
+}
+
+static int clk_alpha_pll_postdiv_fabia_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+ int i, val = 0, div, ret;
+
+ /*
+ * If the PLL is in FSM mode, then treat set_rate callback as a
+ * no-operation.
+ */
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return ret;
+
+ if (val & PLL_VOTE_FSM_ENA)
+ return 0;
+
+ if (!pll->post_div_table) {
+ pr_err("Missing the post_div_table for the PLL\n");
+ return -EINVAL;
+ }
+
+ div = DIV_ROUND_UP_ULL((u64)parent_rate, rate);
+ for (i = 0; i < pll->num_post_div; i++) {
+ if (pll->post_div_table[i].div == div) {
+ val = pll->post_div_table[i].val;
+ break;
+ }
+ }
+
+ return regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
+ (BIT(pll->width) - 1) << pll->post_div_shift,
+ val << pll->post_div_shift);
+}
+
+const struct clk_ops clk_alpha_pll_postdiv_fabia_ops = {
+ .recalc_rate = clk_alpha_pll_postdiv_fabia_recalc_rate,
+ .round_rate = clk_alpha_pll_postdiv_fabia_round_rate,
+ .set_rate = clk_alpha_pll_postdiv_fabia_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_fabia_ops);
diff --git a/rr-cache/5be910894ae8db7b83cdac81cc58dc83c4671161/postimage b/rr-cache/5be910894ae8db7b83cdac81cc58dc83c4671161/postimage
new file mode 100644
index 0000000..c33b3ca
--- /dev/null
+++ b/rr-cache/5be910894ae8db7b83cdac81cc58dc83c4671161/postimage
@@ -0,0 +1,8 @@
+obj-$(CONFIG_SND_SOC_QDSP6_COMMON) += q6dsp-common.o
+obj-$(CONFIG_SND_SOC_QDSP6_CORE) += q6core.o
+obj-$(CONFIG_SND_SOC_QDSP6_AFE) += q6afe.o
+obj-$(CONFIG_SND_SOC_QDSP6_AFE_DAI) += q6afe-dai.o
+obj-$(CONFIG_SND_SOC_QDSP6_ADM) += q6adm.o
+obj-$(CONFIG_SND_SOC_QDSP6_ROUTING) += q6routing.o
+obj-$(CONFIG_SND_SOC_QDSP6_ASM) += q6asm.o
+obj-$(CONFIG_SND_SOC_QDSP6_ASM_DAI) += q6asm-dai.o
diff --git a/rr-cache/5be910894ae8db7b83cdac81cc58dc83c4671161/preimage b/rr-cache/5be910894ae8db7b83cdac81cc58dc83c4671161/preimage
new file mode 100644
index 0000000..117cffc
--- /dev/null
+++ b/rr-cache/5be910894ae8db7b83cdac81cc58dc83c4671161/preimage
@@ -0,0 +1,15 @@
+obj-$(CONFIG_SND_SOC_QDSP6_COMMON) += q6dsp-common.o
+<<<<<<<
+obj-$(CONFIG_SND_SOC_QDSP6_AFE) += q6afe.o q6afe-dai.o
+obj-$(CONFIG_SND_SOC_QDSP6_ADM) += q6adm.o q6routing.o
+obj-$(CONFIG_SND_SOC_QDSP6_ASM) += q6asm.o q6asm-dai.o
+obj-$(CONFIG_SND_SOC_QDSP6_CORE) += q6core.o
+=======
+obj-$(CONFIG_SND_SOC_QDSP6_CORE) += q6core.o
+obj-$(CONFIG_SND_SOC_QDSP6_AFE) += q6afe.o
+obj-$(CONFIG_SND_SOC_QDSP6_AFE_DAI) += q6afe-dai.o
+obj-$(CONFIG_SND_SOC_QDSP6_ADM) += q6adm.o
+obj-$(CONFIG_SND_SOC_QDSP6_ROUTING) += q6routing.o
+obj-$(CONFIG_SND_SOC_QDSP6_ASM) += q6asm.o
+obj-$(CONFIG_SND_SOC_QDSP6_ASM_DAI) += q6asm-dai.o
+>>>>>>>
diff --git a/rr-cache/5be910894ae8db7b83cdac81cc58dc83c4671161/thisimage b/rr-cache/5be910894ae8db7b83cdac81cc58dc83c4671161/thisimage
new file mode 100644
index 0000000..117cffc
--- /dev/null
+++ b/rr-cache/5be910894ae8db7b83cdac81cc58dc83c4671161/thisimage
@@ -0,0 +1,15 @@
+obj-$(CONFIG_SND_SOC_QDSP6_COMMON) += q6dsp-common.o
+<<<<<<<
+obj-$(CONFIG_SND_SOC_QDSP6_AFE) += q6afe.o q6afe-dai.o
+obj-$(CONFIG_SND_SOC_QDSP6_ADM) += q6adm.o q6routing.o
+obj-$(CONFIG_SND_SOC_QDSP6_ASM) += q6asm.o q6asm-dai.o
+obj-$(CONFIG_SND_SOC_QDSP6_CORE) += q6core.o
+=======
+obj-$(CONFIG_SND_SOC_QDSP6_CORE) += q6core.o
+obj-$(CONFIG_SND_SOC_QDSP6_AFE) += q6afe.o
+obj-$(CONFIG_SND_SOC_QDSP6_AFE_DAI) += q6afe-dai.o
+obj-$(CONFIG_SND_SOC_QDSP6_ADM) += q6adm.o
+obj-$(CONFIG_SND_SOC_QDSP6_ROUTING) += q6routing.o
+obj-$(CONFIG_SND_SOC_QDSP6_ASM) += q6asm.o
+obj-$(CONFIG_SND_SOC_QDSP6_ASM_DAI) += q6asm-dai.o
+>>>>>>>
diff --git a/rr-cache/60b96944c00d3e22b751173a6dc3d18c9a24b7ee/preimage b/rr-cache/60b96944c00d3e22b751173a6dc3d18c9a24b7ee/preimage
new file mode 100644
index 0000000..a3e8038
--- /dev/null
+++ b/rr-cache/60b96944c00d3e22b751173a6dc3d18c9a24b7ee/preimage
@@ -0,0 +1,1341 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm PCIe root complex driver
+ *
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ * Copyright 2015 Linaro Limited.
+ *
+ * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+<<<<<<<
+#include <linux/gpio.h>
+#include <linux/interconnect.h>
+=======
+#include <linux/gpio/consumer.h>
+>>>>>>>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+#define PCIE20_PARF_SYS_CTRL 0x00
+#define MST_WAKEUP_EN BIT(13)
+#define SLV_WAKEUP_EN BIT(12)
+#define MSTR_ACLK_CGC_DIS BIT(10)
+#define SLV_ACLK_CGC_DIS BIT(9)
+#define CORE_CLK_CGC_DIS BIT(6)
+#define AUX_PWR_DET BIT(4)
+#define L23_CLK_RMV_DIS BIT(2)
+#define L1_CLK_RMV_DIS BIT(1)
+
+#define PCIE20_COMMAND_STATUS 0x04
+#define CMD_BME_VAL 0x4
+#define PCIE20_DEVICE_CONTROL2_STATUS2 0x98
+#define PCIE_CAP_CPL_TIMEOUT_DISABLE 0x10
+
+#define PCIE20_PARF_PHY_CTRL 0x40
+#define PCIE20_PARF_PHY_REFCLK 0x4C
+#define PCIE20_PARF_DBI_BASE_ADDR 0x168
+#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
+#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
+#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178
+#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8
+#define PCIE20_PARF_LTSSM 0x1B0
+#define PCIE20_PARF_SID_OFFSET 0x234
+#define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
+
+#define PCIE20_ELBI_SYS_CTRL 0x04
+#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
+
+#define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818
+#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4
+#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5
+#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c
+#define CFG_BRIDGE_SB_INIT BIT(0)
+
+#define PCIE20_CAP 0x70
+#define PCIE20_CAP_LINK_CAPABILITIES (PCIE20_CAP + 0xC)
+#define PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT (BIT(10) | BIT(11))
+#define PCIE20_CAP_LINK_1 (PCIE20_CAP + 0x14)
+#define PCIE_CAP_LINK1_VAL 0x2FD7F
+
+#define PCIE20_PARF_Q2A_FLUSH 0x1AC
+
+#define PCIE20_MISC_CONTROL_1_REG 0x8BC
+#define DBI_RO_WR_EN 1
+
+#define PERST_DELAY_US 1000
+
+#define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358
+#define SLV_ADDR_SPACE_SZ 0x10000000
+
+#define QCOM_PCIE_2_1_0_MAX_SUPPLY 3
+struct qcom_pcie_resources_2_1_0 {
+ struct clk *iface_clk;
+ struct clk *core_clk;
+ struct clk *ref_clk;
+ struct clk *phy_clk;
+ struct reset_control *pci_reset;
+ struct reset_control *axi_reset;
+ struct reset_control *ahb_reset;
+ struct reset_control *por_reset;
+ struct reset_control *phy_reset;
+ struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
+};
+
+struct qcom_pcie_resources_1_0_0 {
+ struct clk *iface;
+ struct clk *aux;
+ struct clk *master_bus;
+ struct clk *slave_bus;
+ struct reset_control *core;
+ struct regulator *vdda;
+};
+
+#define QCOM_PCIE_2_3_2_MAX_SUPPLY 2
+struct qcom_pcie_resources_2_3_2 {
+ struct clk *aux_clk;
+ struct clk *master_clk;
+ struct clk *slave_clk;
+ struct clk *cfg_clk;
+ struct clk *pipe_clk;
+ struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
+ struct icc_path *path;
+};
+
+struct qcom_pcie_resources_2_4_0 {
+ struct clk *aux_clk;
+ struct clk *master_clk;
+ struct clk *slave_clk;
+ struct reset_control *axi_m_reset;
+ struct reset_control *axi_s_reset;
+ struct reset_control *pipe_reset;
+ struct reset_control *axi_m_vmid_reset;
+ struct reset_control *axi_s_xpu_reset;
+ struct reset_control *parf_reset;
+ struct reset_control *phy_reset;
+ struct reset_control *axi_m_sticky_reset;
+ struct reset_control *pipe_sticky_reset;
+ struct reset_control *pwr_reset;
+ struct reset_control *ahb_reset;
+ struct reset_control *phy_ahb_reset;
+};
+
+struct qcom_pcie_resources_2_3_3 {
+ struct clk *iface;
+ struct clk *axi_m_clk;
+ struct clk *axi_s_clk;
+ struct clk *ahb_clk;
+ struct clk *aux_clk;
+ struct reset_control *rst[7];
+};
+
+union qcom_pcie_resources {
+ struct qcom_pcie_resources_1_0_0 v1_0_0;
+ struct qcom_pcie_resources_2_1_0 v2_1_0;
+ struct qcom_pcie_resources_2_3_2 v2_3_2;
+ struct qcom_pcie_resources_2_3_3 v2_3_3;
+ struct qcom_pcie_resources_2_4_0 v2_4_0;
+};
+
+struct qcom_pcie;
+
+struct qcom_pcie_ops {
+ int (*get_resources)(struct qcom_pcie *pcie);
+ int (*init)(struct qcom_pcie *pcie);
+ int (*post_init)(struct qcom_pcie *pcie);
+ void (*deinit)(struct qcom_pcie *pcie);
+ void (*post_deinit)(struct qcom_pcie *pcie);
+ void (*ltssm_enable)(struct qcom_pcie *pcie);
+};
+
+struct qcom_pcie {
+ struct dw_pcie *pci;
+ void __iomem *parf; /* DT parf */
+ void __iomem *elbi; /* DT elbi */
+ union qcom_pcie_resources res;
+ struct phy *phy;
+ struct gpio_desc *reset;
+ const struct qcom_pcie_ops *ops;
+};
+
+#define to_qcom_pcie(x) dev_get_drvdata((x)->dev)
+
+static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
+{
+ gpiod_set_value_cansleep(pcie->reset, 1);
+ usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
+}
+
+static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
+{
+ gpiod_set_value_cansleep(pcie->reset, 0);
+ usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
+}
+
+static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+
+ if (dw_pcie_link_up(pci))
+ return 0;
+
+ /* Enable Link Training state machine */
+ if (pcie->ops->ltssm_enable)
+ pcie->ops->ltssm_enable(pcie);
+
+ return dw_pcie_wait_for_link(pci);
+}
+
+static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
+{
+ u32 val;
+
+ /* enable link training */
+ val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
+ val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
+ writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
+}
+
+static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ res->supplies[0].supply = "vdda";
+ res->supplies[1].supply = "vdda_phy";
+ res->supplies[2].supply = "vdda_refclk";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
+ res->supplies);
+ if (ret)
+ return ret;
+
+ res->iface_clk = devm_clk_get(dev, "iface");
+ if (IS_ERR(res->iface_clk))
+ return PTR_ERR(res->iface_clk);
+
+ res->ref_clk = devm_clk_get(dev, "ref");
+
+ if (IS_ERR(res->ref_clk)) {
+ if (PTR_ERR(res->ref_clk) == -EPROBE_DEFER)
+ return PTR_ERR(res->ref_clk);
+
+ res->ref_clk = NULL;
+ }
+
+ res->core_clk = devm_clk_get(dev, "core");
+ if (IS_ERR(res->core_clk))
+ return PTR_ERR(res->core_clk);
+
+ res->phy_clk = devm_clk_get(dev, "phy");
+ if (IS_ERR(res->phy_clk))
+ return PTR_ERR(res->phy_clk);
+
+ res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
+ if (IS_ERR(res->pci_reset))
+ return PTR_ERR(res->pci_reset);
+
+ res->axi_reset = devm_reset_control_get_exclusive(dev, "axi");
+ if (IS_ERR(res->axi_reset))
+ return PTR_ERR(res->axi_reset);
+
+ res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
+ if (IS_ERR(res->ahb_reset))
+ return PTR_ERR(res->ahb_reset);
+
+ res->por_reset = devm_reset_control_get_exclusive(dev, "por");
+ if (IS_ERR(res->por_reset))
+ return PTR_ERR(res->por_reset);
+
+ res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
+ return PTR_ERR_OR_ZERO(res->phy_reset);
+}
+
+static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
+
+ reset_control_assert(res->pci_reset);
+ reset_control_assert(res->axi_reset);
+ reset_control_assert(res->ahb_reset);
+ reset_control_assert(res->por_reset);
+ reset_control_assert(res->pci_reset);
+ clk_disable_unprepare(res->iface_clk);
+ clk_disable_unprepare(res->ref_clk);
+ clk_disable_unprepare(res->core_clk);
+ clk_disable_unprepare(res->phy_clk);
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+}
+
+static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ u32 val;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
+ if (ret < 0) {
+ dev_err(dev, "cannot enable regulators\n");
+ return ret;
+ }
+
+ ret = reset_control_assert(res->ahb_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert ahb reset\n");
+ goto err_assert_ahb;
+ }
+
+ ret = clk_prepare_enable(res->ref_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable ref clock\n");
+ goto err_assert_ahb;
+ }
+
+ ret = clk_prepare_enable(res->iface_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable iface clock\n");
+ goto err_clk_iface;
+ }
+
+ ret = clk_prepare_enable(res->phy_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable phy clock\n");
+ goto err_clk_phy;
+ }
+
+ ret = clk_prepare_enable(res->core_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable core clock\n");
+ goto err_clk_core;
+ }
+
+ ret = reset_control_deassert(res->ahb_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert ahb reset\n");
+ goto err_deassert_ahb;
+ }
+
+ /* enable PCIe clocks and resets */
+ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+ val &= ~BIT(0);
+ writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+ /* enable external reference clock */
+ val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
+ val |= BIT(16);
+ writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
+
+ ret = reset_control_deassert(res->phy_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert phy reset\n");
+ return ret;
+ }
+
+ ret = reset_control_deassert(res->pci_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert pci reset\n");
+ return ret;
+ }
+
+ ret = reset_control_deassert(res->por_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert por reset\n");
+ return ret;
+ }
+
+ ret = reset_control_deassert(res->axi_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert axi reset\n");
+ return ret;
+ }
+
+ /* wait for clock acquisition */
+ usleep_range(1000, 1500);
+
+
+ /* Set the Max TLP size to 2K, instead of using default of 4K */
+ writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
+ pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0);
+ writel(CFG_BRIDGE_SB_INIT,
+ pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);
+
+ return 0;
+
+err_deassert_ahb:
+ clk_disable_unprepare(res->core_clk);
+err_clk_core:
+ clk_disable_unprepare(res->phy_clk);
+err_clk_phy:
+ clk_disable_unprepare(res->iface_clk);
+err_clk_iface:
+ clk_disable_unprepare(res->ref_clk);
+err_assert_ahb:
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+
+ return ret;
+}
+
+static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+
+ res->vdda = devm_regulator_get(dev, "vdda");
+ if (IS_ERR(res->vdda))
+ return PTR_ERR(res->vdda);
+
+ res->iface = devm_clk_get(dev, "iface");
+ if (IS_ERR(res->iface))
+ return PTR_ERR(res->iface);
+
+ res->aux = devm_clk_get(dev, "aux");
+ if (IS_ERR(res->aux))
+ return PTR_ERR(res->aux);
+
+ res->master_bus = devm_clk_get(dev, "master_bus");
+ if (IS_ERR(res->master_bus))
+ return PTR_ERR(res->master_bus);
+
+ res->slave_bus = devm_clk_get(dev, "slave_bus");
+ if (IS_ERR(res->slave_bus))
+ return PTR_ERR(res->slave_bus);
+
+ res->core = devm_reset_control_get_exclusive(dev, "core");
+ return PTR_ERR_OR_ZERO(res->core);
+}
+
+static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
+
+ reset_control_assert(res->core);
+ clk_disable_unprepare(res->slave_bus);
+ clk_disable_unprepare(res->master_bus);
+ clk_disable_unprepare(res->iface);
+ clk_disable_unprepare(res->aux);
+ regulator_disable(res->vdda);
+}
+
+static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ ret = reset_control_deassert(res->core);
+ if (ret) {
+ dev_err(dev, "cannot deassert core reset\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(res->aux);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable aux clock\n");
+ goto err_res;
+ }
+
+ ret = clk_prepare_enable(res->iface);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable iface clock\n");
+ goto err_aux;
+ }
+
+ ret = clk_prepare_enable(res->master_bus);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable master_bus clock\n");
+ goto err_iface;
+ }
+
+ ret = clk_prepare_enable(res->slave_bus);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable slave_bus clock\n");
+ goto err_master;
+ }
+
+ ret = regulator_enable(res->vdda);
+ if (ret) {
+ dev_err(dev, "cannot enable vdda regulator\n");
+ goto err_slave;
+ }
+
+ /* change DBI base address */
+ writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+
+ val |= BIT(31);
+ writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+ }
+
+ return 0;
+err_slave:
+ clk_disable_unprepare(res->slave_bus);
+err_master:
+ clk_disable_unprepare(res->master_bus);
+err_iface:
+ clk_disable_unprepare(res->iface);
+err_aux:
+ clk_disable_unprepare(res->aux);
+err_res:
+ reset_control_assert(res->core);
+
+ return ret;
+}
+
+static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
+{
+ u32 val;
+
+ /* enable link training */
+ val = readl(pcie->parf + PCIE20_PARF_LTSSM);
+ val |= BIT(8);
+ writel(val, pcie->parf + PCIE20_PARF_LTSSM);
+}
+
+static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ res->supplies[0].supply = "vdda";
+ res->supplies[1].supply = "vddpe-3v3";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
+ res->supplies);
+ if (ret)
+ return ret;
+
+ res->path = of_icc_get(dev, "ddr");
+ if (IS_ERR(res->path))
+ return PTR_ERR(res->path);
+
+ res->aux_clk = devm_clk_get(dev, "aux");
+ if (IS_ERR(res->aux_clk))
+ return PTR_ERR(res->aux_clk);
+
+ res->cfg_clk = devm_clk_get(dev, "cfg");
+ if (IS_ERR(res->cfg_clk))
+ return PTR_ERR(res->cfg_clk);
+
+ res->master_clk = devm_clk_get(dev, "bus_master");
+ if (IS_ERR(res->master_clk))
+ return PTR_ERR(res->master_clk);
+
+ res->slave_clk = devm_clk_get(dev, "bus_slave");
+ if (IS_ERR(res->slave_clk))
+ return PTR_ERR(res->slave_clk);
+
+ res->pipe_clk = devm_clk_get(dev, "pipe");
+ return PTR_ERR_OR_ZERO(res->pipe_clk);
+}
+
+static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
+
+ clk_disable_unprepare(res->slave_clk);
+ clk_disable_unprepare(res->master_clk);
+ clk_disable_unprepare(res->cfg_clk);
+ clk_disable_unprepare(res->aux_clk);
+
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+ icc_put(res->path);
+}
+
+static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
+
+ clk_disable_unprepare(res->pipe_clk);
+}
+
+static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ u32 val;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
+ if (ret < 0) {
+ dev_err(dev, "cannot enable regulators\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(res->aux_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable aux clock\n");
+ goto err_aux_clk;
+ }
+
+ ret = clk_prepare_enable(res->cfg_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable cfg clock\n");
+ goto err_cfg_clk;
+ }
+
+ ret = clk_prepare_enable(res->master_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable master clock\n");
+ goto err_master_clk;
+ }
+
+ ret = clk_prepare_enable(res->slave_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable slave clock\n");
+ goto err_slave_clk;
+ }
+
+ icc_set(res->path, 500, 800);
+
+ /* enable PCIe clocks and resets */
+ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+ val &= ~BIT(0);
+ writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+ /* change DBI base address */
+ writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+ /* MAC PHY_POWERDOWN MUX DISABLE */
+ val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
+ val &= ~BIT(29);
+ writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
+
+ val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+ val |= BIT(4);
+ writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+
+ val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+ val |= BIT(31);
+ writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+
+ return 0;
+
+err_slave_clk:
+ clk_disable_unprepare(res->master_clk);
+err_master_clk:
+ clk_disable_unprepare(res->cfg_clk);
+err_cfg_clk:
+ clk_disable_unprepare(res->aux_clk);
+
+err_aux_clk:
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+
+ return ret;
+}
+
+static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ ret = clk_prepare_enable(res->pipe_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable pipe clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+
+ res->aux_clk = devm_clk_get(dev, "aux");
+ if (IS_ERR(res->aux_clk))
+ return PTR_ERR(res->aux_clk);
+
+ res->master_clk = devm_clk_get(dev, "master_bus");
+ if (IS_ERR(res->master_clk))
+ return PTR_ERR(res->master_clk);
+
+ res->slave_clk = devm_clk_get(dev, "slave_bus");
+ if (IS_ERR(res->slave_clk))
+ return PTR_ERR(res->slave_clk);
+
+ res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m");
+ if (IS_ERR(res->axi_m_reset))
+ return PTR_ERR(res->axi_m_reset);
+
+ res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s");
+ if (IS_ERR(res->axi_s_reset))
+ return PTR_ERR(res->axi_s_reset);
+
+ res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe");
+ if (IS_ERR(res->pipe_reset))
+ return PTR_ERR(res->pipe_reset);
+
+ res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev,
+ "axi_m_vmid");
+ if (IS_ERR(res->axi_m_vmid_reset))
+ return PTR_ERR(res->axi_m_vmid_reset);
+
+ res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev,
+ "axi_s_xpu");
+ if (IS_ERR(res->axi_s_xpu_reset))
+ return PTR_ERR(res->axi_s_xpu_reset);
+
+ res->parf_reset = devm_reset_control_get_exclusive(dev, "parf");
+ if (IS_ERR(res->parf_reset))
+ return PTR_ERR(res->parf_reset);
+
+ res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
+ if (IS_ERR(res->phy_reset))
+ return PTR_ERR(res->phy_reset);
+
+ res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev,
+ "axi_m_sticky");
+ if (IS_ERR(res->axi_m_sticky_reset))
+ return PTR_ERR(res->axi_m_sticky_reset);
+
+ res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev,
+ "pipe_sticky");
+ if (IS_ERR(res->pipe_sticky_reset))
+ return PTR_ERR(res->pipe_sticky_reset);
+
+ res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr");
+ if (IS_ERR(res->pwr_reset))
+ return PTR_ERR(res->pwr_reset);
+
+ res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
+ if (IS_ERR(res->ahb_reset))
+ return PTR_ERR(res->ahb_reset);
+
+ res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb");
+ if (IS_ERR(res->phy_ahb_reset))
+ return PTR_ERR(res->phy_ahb_reset);
+
+ return 0;
+}
+
+static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
+
+ reset_control_assert(res->axi_m_reset);
+ reset_control_assert(res->axi_s_reset);
+ reset_control_assert(res->pipe_reset);
+ reset_control_assert(res->pipe_sticky_reset);
+ reset_control_assert(res->phy_reset);
+ reset_control_assert(res->phy_ahb_reset);
+ reset_control_assert(res->axi_m_sticky_reset);
+ reset_control_assert(res->pwr_reset);
+ reset_control_assert(res->ahb_reset);
+ clk_disable_unprepare(res->aux_clk);
+ clk_disable_unprepare(res->master_clk);
+ clk_disable_unprepare(res->slave_clk);
+}
+
+static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ u32 val;
+ int ret;
+
+ ret = reset_control_assert(res->axi_m_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert axi master reset\n");
+ return ret;
+ }
+
+ ret = reset_control_assert(res->axi_s_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert axi slave reset\n");
+ return ret;
+ }
+
+ usleep_range(10000, 12000);
+
+ ret = reset_control_assert(res->pipe_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert pipe reset\n");
+ return ret;
+ }
+
+ ret = reset_control_assert(res->pipe_sticky_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert pipe sticky reset\n");
+ return ret;
+ }
+
+ ret = reset_control_assert(res->phy_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert phy reset\n");
+ return ret;
+ }
+
+ ret = reset_control_assert(res->phy_ahb_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert phy ahb reset\n");
+ return ret;
+ }
+
+ usleep_range(10000, 12000);
+
+ ret = reset_control_assert(res->axi_m_sticky_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert axi master sticky reset\n");
+ return ret;
+ }
+
+ ret = reset_control_assert(res->pwr_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert power reset\n");
+ return ret;
+ }
+
+ ret = reset_control_assert(res->ahb_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert ahb reset\n");
+ return ret;
+ }
+
+ usleep_range(10000, 12000);
+
+ ret = reset_control_deassert(res->phy_ahb_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert phy ahb reset\n");
+ return ret;
+ }
+
+ ret = reset_control_deassert(res->phy_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert phy reset\n");
+ goto err_rst_phy;
+ }
+
+ ret = reset_control_deassert(res->pipe_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert pipe reset\n");
+ goto err_rst_pipe;
+ }
+
+ ret = reset_control_deassert(res->pipe_sticky_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert pipe sticky reset\n");
+ goto err_rst_pipe_sticky;
+ }
+
+ usleep_range(10000, 12000);
+
+ ret = reset_control_deassert(res->axi_m_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert axi master reset\n");
+ goto err_rst_axi_m;
+ }
+
+ ret = reset_control_deassert(res->axi_m_sticky_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert axi master sticky reset\n");
+ goto err_rst_axi_m_sticky;
+ }
+
+ ret = reset_control_deassert(res->axi_s_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert axi slave reset\n");
+ goto err_rst_axi_s;
+ }
+
+ ret = reset_control_deassert(res->pwr_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert power reset\n");
+ goto err_rst_pwr;
+ }
+
+ ret = reset_control_deassert(res->ahb_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert ahb reset\n");
+ goto err_rst_ahb;
+ }
+
+ usleep_range(10000, 12000);
+
+ ret = clk_prepare_enable(res->aux_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable iface clock\n");
+ goto err_clk_aux;
+ }
+
+ ret = clk_prepare_enable(res->master_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable core clock\n");
+ goto err_clk_axi_m;
+ }
+
+ ret = clk_prepare_enable(res->slave_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable phy clock\n");
+ goto err_clk_axi_s;
+ }
+
+ /* enable PCIe clocks and resets */
+ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+ val &= ~BIT(0);
+ writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+ /* change DBI base address */
+ writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+ /* MAC PHY_POWERDOWN MUX DISABLE */
+ val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
+ val &= ~BIT(29);
+ writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
+
+ val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+ val |= BIT(4);
+ writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+
+ val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+ val |= BIT(31);
+ writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+
+ return 0;
+
+err_clk_axi_s:
+ clk_disable_unprepare(res->master_clk);
+err_clk_axi_m:
+ clk_disable_unprepare(res->aux_clk);
+err_clk_aux:
+ reset_control_assert(res->ahb_reset);
+err_rst_ahb:
+ reset_control_assert(res->pwr_reset);
+err_rst_pwr:
+ reset_control_assert(res->axi_s_reset);
+err_rst_axi_s:
+ reset_control_assert(res->axi_m_sticky_reset);
+err_rst_axi_m_sticky:
+ reset_control_assert(res->axi_m_reset);
+err_rst_axi_m:
+ reset_control_assert(res->pipe_sticky_reset);
+err_rst_pipe_sticky:
+ reset_control_assert(res->pipe_reset);
+err_rst_pipe:
+ reset_control_assert(res->phy_reset);
+err_rst_phy:
+ reset_control_assert(res->phy_ahb_reset);
+ return ret;
+}
+
+static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int i;
+ const char *rst_names[] = { "axi_m", "axi_s", "pipe",
+ "axi_m_sticky", "sticky",
+ "ahb", "sleep", };
+
+ res->iface = devm_clk_get(dev, "iface");
+ if (IS_ERR(res->iface))
+ return PTR_ERR(res->iface);
+
+ res->axi_m_clk = devm_clk_get(dev, "axi_m");
+ if (IS_ERR(res->axi_m_clk))
+ return PTR_ERR(res->axi_m_clk);
+
+ res->axi_s_clk = devm_clk_get(dev, "axi_s");
+ if (IS_ERR(res->axi_s_clk))
+ return PTR_ERR(res->axi_s_clk);
+
+ res->ahb_clk = devm_clk_get(dev, "ahb");
+ if (IS_ERR(res->ahb_clk))
+ return PTR_ERR(res->ahb_clk);
+
+ res->aux_clk = devm_clk_get(dev, "aux");
+ if (IS_ERR(res->aux_clk))
+ return PTR_ERR(res->aux_clk);
+
+ for (i = 0; i < ARRAY_SIZE(rst_names); i++) {
+ res->rst[i] = devm_reset_control_get(dev, rst_names[i]);
+ if (IS_ERR(res->rst[i]))
+ return PTR_ERR(res->rst[i]);
+ }
+
+ return 0;
+}
+
+static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
+
+ clk_disable_unprepare(res->iface);
+ clk_disable_unprepare(res->axi_m_clk);
+ clk_disable_unprepare(res->axi_s_clk);
+ clk_disable_unprepare(res->ahb_clk);
+ clk_disable_unprepare(res->aux_clk);
+}
+
+static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int i, ret;
+ u32 val;
+
+ for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
+ ret = reset_control_assert(res->rst[i]);
+ if (ret) {
+ dev_err(dev, "reset #%d assert failed (%d)\n", i, ret);
+ return ret;
+ }
+ }
+
+ usleep_range(2000, 2500);
+
+ for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
+ ret = reset_control_deassert(res->rst[i]);
+ if (ret) {
+ dev_err(dev, "reset #%d deassert failed (%d)\n", i,
+ ret);
+ return ret;
+ }
+ }
+
+ /*
+ * Don't have a way to see if the reset has completed.
+ * Wait for some time.
+ */
+ usleep_range(2000, 2500);
+
+ ret = clk_prepare_enable(res->iface);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable core clock\n");
+ goto err_clk_iface;
+ }
+
+ ret = clk_prepare_enable(res->axi_m_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable core clock\n");
+ goto err_clk_axi_m;
+ }
+
+ ret = clk_prepare_enable(res->axi_s_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable axi slave clock\n");
+ goto err_clk_axi_s;
+ }
+
+ ret = clk_prepare_enable(res->ahb_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable ahb clock\n");
+ goto err_clk_ahb;
+ }
+
+ ret = clk_prepare_enable(res->aux_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable aux clock\n");
+ goto err_clk_aux;
+ }
+
+ writel(SLV_ADDR_SPACE_SZ,
+ pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE);
+
+ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+ val &= ~BIT(0);
+ writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+ writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+ writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
+ | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
+ AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
+ pcie->parf + PCIE20_PARF_SYS_CTRL);
+ writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH);
+
+ writel(CMD_BME_VAL, pci->dbi_base + PCIE20_COMMAND_STATUS);
+ writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG);
+ writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + PCIE20_CAP_LINK_1);
+
+ val = readl(pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
+ val &= ~PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT;
+ writel(val, pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
+
+ writel(PCIE_CAP_CPL_TIMEOUT_DISABLE, pci->dbi_base +
+ PCIE20_DEVICE_CONTROL2_STATUS2);
+
+ return 0;
+
+err_clk_aux:
+ clk_disable_unprepare(res->ahb_clk);
+err_clk_ahb:
+ clk_disable_unprepare(res->axi_s_clk);
+err_clk_axi_s:
+ clk_disable_unprepare(res->axi_m_clk);
+err_clk_axi_m:
+ clk_disable_unprepare(res->iface);
+err_clk_iface:
+ /*
+ * Not checking for failure, will anyway return
+ * the original failure in 'ret'.
+ */
+ for (i = 0; i < ARRAY_SIZE(res->rst); i++)
+ reset_control_assert(res->rst[i]);
+
+ return ret;
+}
+
+static int qcom_pcie_link_up(struct dw_pcie *pci)
+{
+ u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
+
+ return !!(val & PCI_EXP_LNKSTA_DLLLA);
+}
+
+static int qcom_pcie_host_init(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct qcom_pcie *pcie = to_qcom_pcie(pci);
+ int ret;
+
+<<<<<<<
+ pm_runtime_get_sync(pci->dev);
+=======
+
+ pm_runtime_get_sync(pci->dev);
+
+>>>>>>>
+ qcom_ep_reset_assert(pcie);
+
+ ret = pcie->ops->init(pcie);
+ if (ret)
+ return ret;
+
+ ret = phy_power_on(pcie->phy);
+ if (ret)
+ goto err_deinit;
+
+ if (pcie->ops->post_init) {
+ ret = pcie->ops->post_init(pcie);
+ if (ret)
+ goto err_disable_phy;
+ }
+
+ dw_pcie_setup_rc(pp);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ dw_pcie_msi_init(pp);
+
+ qcom_ep_reset_deassert(pcie);
+
+ ret = qcom_pcie_establish_link(pcie);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ qcom_ep_reset_assert(pcie);
+ if (pcie->ops->post_deinit)
+ pcie->ops->post_deinit(pcie);
+err_disable_phy:
+ phy_power_off(pcie->phy);
+err_deinit:
+ pcie->ops->deinit(pcie);
+<<<<<<<
+ pm_runtime_put(pci->dev);
+=======
+ pm_runtime_put_sync(pci->dev);
+>>>>>>>
+
+ return ret;
+}
+
+static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
+ u32 *val)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
+ /* the device class is not reported correctly from the register */
+ if (where == PCI_CLASS_REVISION && size == 4) {
+ *val = readl(pci->dbi_base + PCI_CLASS_REVISION);
+ *val &= 0xff; /* keep revision id */
+ *val |= PCI_CLASS_BRIDGE_PCI << 16;
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ return dw_pcie_read(pci->dbi_base + where, size, val);
+}
+
+static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
+ .host_init = qcom_pcie_host_init,
+ .rd_own_conf = qcom_pcie_rd_own_conf,
+};
+
+/* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */
+static const struct qcom_pcie_ops ops_2_1_0 = {
+ .get_resources = qcom_pcie_get_resources_2_1_0,
+ .init = qcom_pcie_init_2_1_0,
+ .deinit = qcom_pcie_deinit_2_1_0,
+ .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
+};
+
+/* Qcom IP rev.: 1.0.0 Synopsys IP rev.: 4.11a */
+static const struct qcom_pcie_ops ops_1_0_0 = {
+ .get_resources = qcom_pcie_get_resources_1_0_0,
+ .init = qcom_pcie_init_1_0_0,
+ .deinit = qcom_pcie_deinit_1_0_0,
+ .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
+};
+
+/* Qcom IP rev.: 2.3.2 Synopsys IP rev.: 4.21a */
+static const struct qcom_pcie_ops ops_2_3_2 = {
+ .get_resources = qcom_pcie_get_resources_2_3_2,
+ .init = qcom_pcie_init_2_3_2,
+ .post_init = qcom_pcie_post_init_2_3_2,
+ .deinit = qcom_pcie_deinit_2_3_2,
+ .post_deinit = qcom_pcie_post_deinit_2_3_2,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+};
+
+/* Qcom IP rev.: 2.4.0 Synopsys IP rev.: 4.20a */
+static const struct qcom_pcie_ops ops_2_4_0 = {
+ .get_resources = qcom_pcie_get_resources_2_4_0,
+ .init = qcom_pcie_init_2_4_0,
+ .deinit = qcom_pcie_deinit_2_4_0,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+};
+
+/* Qcom IP rev.: 2.3.3 Synopsys IP rev.: 4.30a */
+static const struct qcom_pcie_ops ops_2_3_3 = {
+ .get_resources = qcom_pcie_get_resources_2_3_3,
+ .init = qcom_pcie_init_2_3_3,
+ .deinit = qcom_pcie_deinit_2_3_3,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+};
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .link_up = qcom_pcie_link_up,
+};
+
+static int qcom_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct pcie_port *pp;
+ struct dw_pcie *pci;
+ struct qcom_pcie *pcie;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ pm_runtime_enable(dev);
+ pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
+ pp = &pci->pp;
+
+ pcie->pci = pci;
+
+ pcie->ops = of_device_get_match_data(dev);
+
+ pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW);
+ if (IS_ERR(pcie->reset))
+ return PTR_ERR(pcie->reset);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf");
+ pcie->parf = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pcie->parf))
+ return PTR_ERR(pcie->parf);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
+ pcie->elbi = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pcie->elbi))
+ return PTR_ERR(pcie->elbi);
+
+ pcie->phy = devm_phy_optional_get(dev, "pciephy");
+ if (IS_ERR(pcie->phy))
+ return PTR_ERR(pcie->phy);
+
+ ret = pcie->ops->get_resources(pcie);
+ if (ret)
+ return ret;
+
+ pp->root_bus_nr = -1;
+ pp->ops = &qcom_pcie_dw_ops;
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ pp->msi_irq = platform_get_irq_byname(pdev, "msi");
+ if (pp->msi_irq < 0)
+ return pp->msi_irq;
+ }
+
+ ret = phy_init(pcie->phy);
+ if (ret) {
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, pcie);
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "cannot initialize host\n");
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id qcom_pcie_match[] = {
+ { .compatible = "qcom,pcie-apq8084", .data = &ops_1_0_0 },
+ { .compatible = "qcom,pcie-ipq8064", .data = &ops_2_1_0 },
+ { .compatible = "qcom,pcie-apq8064", .data = &ops_2_1_0 },
+ { .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 },
+ { .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 },
+ { .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 },
+ { }
+};
+
+static struct platform_driver qcom_pcie_driver = {
+ .probe = qcom_pcie_probe,
+ .driver = {
+ .name = "qcom-pcie",
+ .suppress_bind_attrs = true,
+ .of_match_table = qcom_pcie_match,
+ },
+};
+builtin_platform_driver(qcom_pcie_driver);
diff --git a/rr-cache/651becc8f80dad1e588d533860a7befbceb8be94/postimage b/rr-cache/651becc8f80dad1e588d533860a7befbceb8be94/postimage
new file mode 100644
index 0000000..06f03a5
--- /dev/null
+++ b/rr-cache/651becc8f80dad1e588d533860a7befbceb8be94/postimage
@@ -0,0 +1,380 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+// Copyright (c) 2018, Linaro Limited
+
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/jiffies.h>
+#include <linux/wait.h>
+#include <linux/soc/qcom/apr.h>
+#include "q6core.h"
+#include "q6dsp-errno.h"
+
+#define ADSP_STATE_READY_TIMEOUT_MS 3000
+#define Q6_READY_TIMEOUT_MS 100
+#define AVCS_CMD_ADSP_EVENT_GET_STATE 0x0001290C
+#define AVCS_CMDRSP_ADSP_EVENT_GET_STATE 0x0001290D
+#define AVCS_GET_VERSIONS 0x00012905
+#define AVCS_GET_VERSIONS_RSP 0x00012906
+#define AVCS_CMD_GET_FWK_VERSION 0x001292c
+#define AVCS_CMDRSP_GET_FWK_VERSION 0x001292d
+
+struct avcs_svc_info {
+ uint32_t service_id;
+ uint32_t version;
+} __packed;
+
+struct avcs_cmdrsp_get_version {
+ uint32_t build_id;
+ uint32_t num_services;
+ struct avcs_svc_info svc_api_info[];
+} __packed;
+
+/* for ADSP2.8 and above */
+struct avcs_svc_api_info {
+ uint32_t service_id;
+ uint32_t api_version;
+ uint32_t api_branch_version;
+} __packed;
+
+struct avcs_cmdrsp_get_fwk_version {
+ uint32_t build_major_version;
+ uint32_t build_minor_version;
+ uint32_t build_branch_version;
+ uint32_t build_subbranch_version;
+ uint32_t num_services;
+ struct avcs_svc_api_info svc_api_info[];
+} __packed;
+
+struct q6core {
+ struct apr_device *adev;
+ wait_queue_head_t wait;
+ uint32_t avcs_state;
+ struct mutex lock;
+ bool resp_received;
+ uint32_t num_services;
+ struct avcs_cmdrsp_get_fwk_version *fwk_version;
+ struct avcs_cmdrsp_get_version *svc_version;
+ bool fwk_version_supported;
+ bool get_state_supported;
+ bool get_version_supported;
+ bool is_version_requested;
+};
+
+static struct q6core *g_core;
+
+static int q6core_callback(struct apr_device *adev, struct apr_resp_pkt *data)
+{
+ struct q6core *core = dev_get_drvdata(&adev->dev);
+ struct aprv2_ibasic_rsp_result_t *result;
+ struct apr_hdr *hdr = &data->hdr;
+
+ result = data->payload;
+ switch (hdr->opcode) {
+ case APR_BASIC_RSP_RESULT:{
+ result = data->payload;
+ switch (result->opcode) {
+ case AVCS_GET_VERSIONS:
+ if (result->status == ADSP_EUNSUPPORTED)
+ core->get_version_supported = false;
+ core->resp_received = true;
+ break;
+ case AVCS_CMD_GET_FWK_VERSION:
+ if (result->status == ADSP_EUNSUPPORTED)
+ core->fwk_version_supported = false;
+ core->resp_received = true;
+ break;
+ case AVCS_CMD_ADSP_EVENT_GET_STATE:
+ if (result->status == ADSP_EUNSUPPORTED)
+ core->get_state_supported = false;
+ core->resp_received = true;
+ break;
+ }
+ break;
+ }
+ case AVCS_CMDRSP_GET_FWK_VERSION: {
+ struct avcs_cmdrsp_get_fwk_version *fwk;
+ int bytes;
+
+ fwk = data->payload;
+ bytes = sizeof(*fwk) + fwk->num_services *
+ sizeof(fwk->svc_api_info[0]);
+
+ core->fwk_version = kzalloc(bytes, GFP_ATOMIC);
+ if (!core->fwk_version)
+ return -ENOMEM;
+
+ memcpy(core->fwk_version, data->payload, bytes);
+
+ core->fwk_version_supported = true;
+ core->resp_received = true;
+
+ break;
+ }
+ case AVCS_GET_VERSIONS_RSP: {
+ struct avcs_cmdrsp_get_version *v;
+ int len;
+
+ v = data->payload;
+
+ len = sizeof(*v) + v->num_services * sizeof(v->svc_api_info[0]);
+
+ core->svc_version = kzalloc(len, GFP_ATOMIC);
+ if (!core->svc_version)
+ return -ENOMEM;
+
+ memcpy(core->svc_version, data->payload, len);
+
+ core->get_version_supported = true;
+ core->resp_received = true;
+
+ break;
+ }
+ case AVCS_CMDRSP_ADSP_EVENT_GET_STATE:
+ core->get_state_supported = true;
+ core->avcs_state = result->opcode;
+
+ core->resp_received = true;
+ break;
+ default:
+ dev_err(&adev->dev, "Message id from adsp core svc: 0x%x\n",
+ hdr->opcode);
+ break;
+ }
+
+ if (core->resp_received)
+ wake_up(&core->wait);
+
+ return 0;
+}
+
+static int q6core_get_fwk_versions(struct q6core *core)
+{
+ struct apr_device *adev = core->adev;
+ struct apr_pkt pkt;
+ int rc;
+
+ pkt.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ pkt.hdr.pkt_size = APR_HDR_SIZE;
+ pkt.hdr.opcode = AVCS_CMD_GET_FWK_VERSION;
+
+ rc = apr_send_pkt(adev, &pkt);
+ if (rc < 0)
+ return rc;
+
+ rc = wait_event_timeout(core->wait, (core->resp_received),
+ msecs_to_jiffies(Q6_READY_TIMEOUT_MS));
+ if (rc > 0 && core->resp_received) {
+ core->resp_received = false;
+
+ if (!core->fwk_version_supported)
+ return -ENOTSUPP;
+ else
+ return 0;
+ }
+
+
+ return rc;
+}
+
+static int q6core_get_svc_versions(struct q6core *core)
+{
+ struct apr_device *adev = core->adev;
+ struct apr_pkt pkt;
+ int rc;
+
+ pkt.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ pkt.hdr.pkt_size = APR_HDR_SIZE;
+ pkt.hdr.opcode = AVCS_GET_VERSIONS;
+
+ rc = apr_send_pkt(adev, &pkt);
+ if (rc < 0)
+ return rc;
+
+ rc = wait_event_timeout(core->wait, (core->resp_received),
+ msecs_to_jiffies(Q6_READY_TIMEOUT_MS));
+ if (rc > 0 && core->resp_received) {
+ core->resp_received = false;
+ return 0;
+ }
+
+ return rc;
+}
+
+static bool __q6core_is_adsp_ready(struct q6core *core)
+{
+ struct apr_device *adev = core->adev;
+ struct apr_pkt pkt;
+ int rc;
+
+ core->get_state_supported = false;
+
+ pkt.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ pkt.hdr.pkt_size = APR_HDR_SIZE;
+ pkt.hdr.opcode = AVCS_CMD_ADSP_EVENT_GET_STATE;
+
+ rc = apr_send_pkt(adev, &pkt);
+ if (rc < 0)
+ return false;
+
+ rc = wait_event_timeout(core->wait, (core->resp_received),
+ msecs_to_jiffies(Q6_READY_TIMEOUT_MS));
+ if (rc > 0 && core->resp_received) {
+ core->resp_received = false;
+
+ if (core->avcs_state)
+ return true;
+ }
+
+ /* assume that the adsp is up if we not support this command */
+ if (!core->get_state_supported)
+ return true;
+
+ return false;
+}
+
+/**
+ * q6core_get_svc_api_info() - Get version number of a service.
+ *
+ * @svc_id: service id of the service.
+ * @ainfo: Valid struct pointer to fill svc api information.
+ *
+ * Return: zero on success and error code on failure or unsupported
+ */
+int q6core_get_svc_api_info(int svc_id, struct q6core_svc_api_info *ainfo)
+{
+ int i;
+ int ret = -ENOTSUPP;
+
+ if (!g_core || !ainfo)
+ return 0;
+
+ mutex_lock(&g_core->lock);
+ if (!g_core->is_version_requested) {
+ if (q6core_get_fwk_versions(g_core) == -ENOTSUPP)
+ q6core_get_svc_versions(g_core);
+ g_core->is_version_requested = true;
+ }
+
+ if (g_core->fwk_version_supported) {
+ for (i = 0; i < g_core->fwk_version->num_services; i++) {
+ struct avcs_svc_api_info *info;
+
+ info = &g_core->fwk_version->svc_api_info[i];
+ if (svc_id != info->service_id)
+ continue;
+
+ ainfo->api_version = info->api_version;
+ ainfo->api_branch_version = info->api_branch_version;
+ ret = 0;
+ break;
+ }
+ } else if (g_core->get_version_supported) {
+ for (i = 0; i < g_core->svc_version->num_services; i++) {
+ struct avcs_svc_info *info;
+
+ info = &g_core->svc_version->svc_api_info[i];
+ if (svc_id != info->service_id)
+ continue;
+
+ ainfo->api_version = info->version;
+ ainfo->api_branch_version = 0;
+ ret = 0;
+ break;
+ }
+ }
+
+ mutex_unlock(&g_core->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(q6core_get_svc_api_info);
+
+/**
+ * q6core_is_adsp_ready() - Get status of adsp
+ *
+ * Return: Will be an true if adsp is ready and false if not.
+ */
+bool q6core_is_adsp_ready(void)
+{
+ unsigned long timeout;
+ bool ret = false;
+
+ if (!g_core)
+ return false;
+
+ mutex_lock(&g_core->lock);
+ timeout = jiffies + msecs_to_jiffies(ADSP_STATE_READY_TIMEOUT_MS);
+ for (;;) {
+ if (__q6core_is_adsp_ready(g_core)) {
+ ret = true;
+ break;
+ }
+
+ if (!time_after(timeout, jiffies)) {
+ ret = false;
+ break;
+ }
+ }
+
+ mutex_unlock(&g_core->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(q6core_is_adsp_ready);
+
+static int q6core_probe(struct apr_device *adev)
+{
+ g_core = kzalloc(sizeof(*g_core), GFP_KERNEL);
+ if (!g_core)
+ return -ENOMEM;
+
+ dev_set_drvdata(&adev->dev, g_core);
+
+ mutex_init(&g_core->lock);
+ g_core->adev = adev;
+ init_waitqueue_head(&g_core->wait);
+ return 0;
+}
+
+static int q6core_exit(struct apr_device *adev)
+{
+ struct q6core *core = dev_get_drvdata(&adev->dev);
+
+ if (core->fwk_version_supported)
+ kfree(core->fwk_version);
+ if (core->get_version_supported)
+ kfree(core->svc_version);
+
+ g_core = NULL;
+ kfree(core);
+
+ return 0;
+}
+
+static const struct of_device_id q6core_device_id[] = {
+ { .compatible = "qcom,q6core" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, q6core_device_id);
+
+static struct apr_driver qcom_q6core_driver = {
+ .probe = q6core_probe,
+ .remove = q6core_exit,
+ .callback = q6core_callback,
+ .driver = {
+ .name = "qcom-q6core",
+ .of_match_table = of_match_ptr(q6core_device_id),
+ },
+};
+
+module_apr_driver(qcom_q6core_driver);
+MODULE_DESCRIPTION("q6 core");
+MODULE_LICENSE("GPL v2");
diff --git a/rr-cache/651becc8f80dad1e588d533860a7befbceb8be94/preimage b/rr-cache/651becc8f80dad1e588d533860a7befbceb8be94/preimage
new file mode 100644
index 0000000..9850334
--- /dev/null
+++ b/rr-cache/651becc8f80dad1e588d533860a7befbceb8be94/preimage
@@ -0,0 +1,554 @@
+// SPDX-License-Identifier: GPL-2.0
+<<<<<<<
+/*
+ * Copyright (c) 2011-2017, The Linux Foundation
+ * Copyright (c) 2018, Linaro Limited
+ */
+=======
+// Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+// Copyright (c) 2018, Linaro Limited
+>>>>>>>
+
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/of.h>
+<<<<<<<
+#include <linux/jiffies.h>
+#include <linux/wait.h>
+#include <linux/soc/qcom/apr.h>
+=======
+#include <linux/of_platform.h>
+#include <linux/jiffies.h>
+#include <linux/wait.h>
+#include <linux/soc/qcom/apr.h>
+#include "q6core.h"
+>>>>>>>
+#include "q6dsp-errno.h"
+
+#define ADSP_STATE_READY_TIMEOUT_MS 3000
+#define Q6_READY_TIMEOUT_MS 100
+#define AVCS_CMD_ADSP_EVENT_GET_STATE 0x0001290C
+#define AVCS_CMDRSP_ADSP_EVENT_GET_STATE 0x0001290D
+#define AVCS_GET_VERSIONS 0x00012905
+#define AVCS_GET_VERSIONS_RSP 0x00012906
+#define AVCS_CMD_GET_FWK_VERSION 0x001292c
+#define AVCS_CMDRSP_GET_FWK_VERSION 0x001292d
+
+<<<<<<<
+=======
+
+>>>>>>>
+struct avcs_svc_info {
+ uint32_t service_id;
+ uint32_t version;
+} __packed;
+
+struct avcs_cmdrsp_get_version {
+ uint32_t build_id;
+ uint32_t num_services;
+ struct avcs_svc_info svc_api_info[];
+} __packed;
+
+/* for ADSP2.8 and above */
+struct avcs_svc_api_info {
+ uint32_t service_id;
+ uint32_t api_version;
+ uint32_t api_branch_version;
+} __packed;
+
+struct avcs_cmdrsp_get_fwk_version {
+ uint32_t build_major_version;
+ uint32_t build_minor_version;
+ uint32_t build_branch_version;
+ uint32_t build_subbranch_version;
+ uint32_t num_services;
+ struct avcs_svc_api_info svc_api_info[];
+} __packed;
+
+struct q6core {
+ struct apr_device *adev;
+ wait_queue_head_t wait;
+ uint32_t avcs_state;
+<<<<<<<
+=======
+ struct mutex lock;
+>>>>>>>
+ bool resp_received;
+ uint32_t num_services;
+ struct avcs_cmdrsp_get_fwk_version *fwk_version;
+ struct avcs_cmdrsp_get_version *svc_version;
+ bool fwk_version_supported;
+ bool get_state_supported;
+ bool get_version_supported;
+<<<<<<<
+ bool is_version_requested;
+};
+
+static struct q6core *g_core;
+
+static int q6core_callback(struct apr_device *adev, struct apr_resp_pkt *data)
+{
+ struct q6core *core = dev_get_drvdata(&adev->dev);
+ struct aprv2_ibasic_rsp_result_t *result;
+ struct apr_hdr *hdr = &data->hdr;
+
+ result = data->payload;
+ switch (hdr->opcode) {
+=======
+};
+
+struct q6core *g_core;
+
+static int q6core_callback(struct apr_device *adev,
+ struct apr_client_message *data)
+{
+ struct q6core *core = dev_get_drvdata(&adev->dev);
+ struct aprv2_ibasic_rsp_result_t *result;
+
+ result = data->payload;
+ switch (data->opcode) {
+>>>>>>>
+ case APR_BASIC_RSP_RESULT:{
+ result = data->payload;
+ switch (result->opcode) {
+ case AVCS_GET_VERSIONS:
+ if (result->status == ADSP_EUNSUPPORTED)
+ core->get_version_supported = false;
+ core->resp_received = true;
+ break;
+ case AVCS_CMD_GET_FWK_VERSION:
+ if (result->status == ADSP_EUNSUPPORTED)
+ core->fwk_version_supported = false;
+ core->resp_received = true;
+ break;
+ case AVCS_CMD_ADSP_EVENT_GET_STATE:
+ if (result->status == ADSP_EUNSUPPORTED)
+ core->get_state_supported = false;
+ core->resp_received = true;
+ break;
+ }
+ break;
+ }
+ case AVCS_CMDRSP_GET_FWK_VERSION: {
+ struct avcs_cmdrsp_get_fwk_version *fwk;
+ int bytes;
+<<<<<<<
+ fwk = data->payload;
+
+ core->fwk_version_supported = true;
+=======
+
+ fwk = data->payload;
+>>>>>>>
+ bytes = sizeof(*fwk) + fwk->num_services *
+ sizeof(fwk->svc_api_info[0]);
+
+ core->fwk_version = kzalloc(bytes, GFP_ATOMIC);
+ if (!core->fwk_version)
+ return -ENOMEM;
+
+ memcpy(core->fwk_version, data->payload, bytes);
+
+<<<<<<<
+=======
+ core->fwk_version_supported = true;
+>>>>>>>
+ core->resp_received = true;
+
+ break;
+ }
+ case AVCS_GET_VERSIONS_RSP: {
+ struct avcs_cmdrsp_get_version *v;
+ int len;
+<<<<<<<
+ v = data->payload;
+ core->get_version_supported = true;
+=======
+
+ v = data->payload;
+>>>>>>>
+
+ len = sizeof(*v) + v->num_services * sizeof(v->svc_api_info[0]);
+
+ core->svc_version = kzalloc(len, GFP_ATOMIC);
+ if (!core->svc_version)
+ return -ENOMEM;
+
+ memcpy(core->svc_version, data->payload, len);
+
+<<<<<<<
+=======
+ core->get_version_supported = true;
+>>>>>>>
+ core->resp_received = true;
+
+ break;
+ }
+ case AVCS_CMDRSP_ADSP_EVENT_GET_STATE:
+ core->get_state_supported = true;
+ core->avcs_state = result->opcode;
+
+ core->resp_received = true;
+ break;
+ default:
+ dev_err(&adev->dev, "Message id from adsp core svc: 0x%x\n",
+<<<<<<<
+ data->opcode);
+=======
+ hdr->opcode);
+>>>>>>>
+ break;
+ }
+
+ if (core->resp_received)
+ wake_up(&core->wait);
+
+ return 0;
+}
+
+static int q6core_get_fwk_versions(struct q6core *core)
+{
+ struct apr_device *adev = core->adev;
+<<<<<<<
+ struct apr_hdr hdr = {0};
+ int rc;
+
+ core->fwk_version_supported = true;
+ hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ hdr.pkt_size = APR_HDR_SIZE;
+ hdr.opcode = AVCS_CMD_GET_FWK_VERSION;
+
+ rc = apr_send_pkt(adev, &hdr);
+=======
+ struct apr_pkt pkt;
+ int rc;
+
+ pkt.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ pkt.hdr.pkt_size = APR_HDR_SIZE;
+ pkt.hdr.opcode = AVCS_CMD_GET_FWK_VERSION;
+
+ rc = apr_send_pkt(adev, &pkt);
+>>>>>>>
+ if (rc < 0)
+ return rc;
+
+ rc = wait_event_timeout(core->wait, (core->resp_received),
+ msecs_to_jiffies(Q6_READY_TIMEOUT_MS));
+ if (rc > 0 && core->resp_received) {
+ core->resp_received = false;
+<<<<<<<
+ return 0;
+=======
+
+ if (!core->fwk_version_supported)
+ return -ENOTSUPP;
+ else
+ return 0;
+>>>>>>>
+ }
+
+
+ return rc;
+}
+
+static int q6core_get_svc_versions(struct q6core *core)
+{
+ struct apr_device *adev = core->adev;
+<<<<<<<
+ struct apr_hdr hdr = {0};
+ int rc;
+
+ core->get_version_supported = true;
+ hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ hdr.pkt_size = APR_HDR_SIZE;
+ hdr.opcode = AVCS_GET_VERSIONS;
+
+ rc = apr_send_pkt(adev, &hdr);
+=======
+ struct apr_pkt pkt;
+ int rc;
+
+ pkt.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ pkt.hdr.pkt_size = APR_HDR_SIZE;
+ pkt.hdr.opcode = AVCS_GET_VERSIONS;
+
+ rc = apr_send_pkt(adev, &pkt);
+>>>>>>>
+ if (rc < 0)
+ return rc;
+
+ rc = wait_event_timeout(core->wait, (core->resp_received),
+ msecs_to_jiffies(Q6_READY_TIMEOUT_MS));
+ if (rc > 0 && core->resp_received) {
+ core->resp_received = false;
+ return 0;
+ }
+
+ return rc;
+}
+
+static bool __q6core_is_adsp_ready(struct q6core *core)
+{
+ struct apr_device *adev = core->adev;
+<<<<<<<
+ struct apr_hdr hdr = {0};
+=======
+ struct apr_pkt pkt;
+>>>>>>>
+ int rc;
+
+ core->get_state_supported = false;
+
+<<<<<<<
+ hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ hdr.pkt_size = APR_HDR_SIZE;
+ hdr.opcode = AVCS_CMD_ADSP_EVENT_GET_STATE;
+
+ rc = apr_send_pkt(adev, &hdr);
+=======
+ pkt.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ pkt.hdr.pkt_size = APR_HDR_SIZE;
+ pkt.hdr.opcode = AVCS_CMD_ADSP_EVENT_GET_STATE;
+
+ rc = apr_send_pkt(adev, &pkt);
+>>>>>>>
+ if (rc < 0)
+ return false;
+
+ rc = wait_event_timeout(core->wait, (core->resp_received),
+ msecs_to_jiffies(Q6_READY_TIMEOUT_MS));
+ if (rc > 0 && core->resp_received) {
+ core->resp_received = false;
+
+<<<<<<<
+ if (core->avcs_state == 0x1)
+=======
+ if (core->avcs_state)
+>>>>>>>
+ return true;
+ }
+
+ /* assume that the adsp is up if we not support this command */
+ if (!core->get_state_supported)
+ return true;
+
+ return false;
+}
+
+/**
+<<<<<<<
+ * q6core_get_svc_api_info() - Get version number of a service.
+ *
+ * @svc_id: service id of the service.
+ * @ainfo: Valid struct pointer to fill svc api information.
+ *
+ * Return: zero on success and error code on failure or unsupported
+ */
+int q6core_get_svc_api_info(int svc_id, struct q6core_svc_api_info *ainfo)
+{
+ int i;
+ int ret = -ENOTSUPP;
+
+ if (!g_core || !ainfo)
+ return 0;
+
+ mutex_lock(&g_core->lock);
+ if (!g_core->is_version_requested) {
+ if (q6core_get_fwk_versions(g_core) == -ENOTSUPP)
+ q6core_get_svc_versions(g_core);
+ g_core->is_version_requested = true;
+ }
+
+=======
+ * q6core_get_svc_version() - Get version number of a service.
+ *
+ * @svc_id: service id of the service.
+ *
+ * Return: Will be a valid version number on success and zero on failure.
+ * version number returned contains bits 0 to 15 as Minor version number
+ * Bits 16 to 31 as Major version number
+ */
+uint32_t q6core_get_svc_version(int svc_id)
+{
+ int i;
+
+ if (!g_core)
+ return 0;
+
+>>>>>>>
+ if (g_core->fwk_version_supported) {
+ for (i = 0; i < g_core->fwk_version->num_services; i++) {
+ struct avcs_svc_api_info *info;
+
+ info = &g_core->fwk_version->svc_api_info[i];
+<<<<<<<
+ if (svc_id != info->service_id)
+ continue;
+
+ ainfo->api_version = info->api_version;
+ ainfo->api_branch_version = info->api_branch_version;
+ ret = 0;
+ break;
+=======
+ if (svc_id == info->service_id)
+ return info->api_version;
+>>>>>>>
+ }
+ } else if (g_core->get_version_supported) {
+ for (i = 0; i < g_core->svc_version->num_services; i++) {
+ struct avcs_svc_info *info;
+
+ info = &g_core->svc_version->svc_api_info[i];
+<<<<<<<
+ if (svc_id != info->service_id)
+ continue;
+
+ ainfo->api_version = info->version;
+ ainfo->api_branch_version = 0;
+ ret = 0;
+ break;
+ }
+ }
+
+ mutex_unlock(&g_core->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(q6core_get_svc_api_info);
+=======
+ pr_err("SVC API Info SVC-ID: %x vs %x API-VER: %x\n",
+ info->service_id, svc_id, info->version);
+
+ if (svc_id == info->service_id)
+ return info->version;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(q6core_get_svc_version);
+>>>>>>>
+
+/**
+ * q6core_is_adsp_ready() - Get status of adsp
+ *
+ * Return: Will be an true if adsp is ready and false if not.
+ */
+bool q6core_is_adsp_ready(void)
+{
+ unsigned long timeout;
+<<<<<<<
+=======
+ bool ret = false;
+>>>>>>>
+
+ if (!g_core)
+ return false;
+
+<<<<<<<
+ mutex_lock(&g_core->lock);
+ timeout = jiffies + msecs_to_jiffies(ADSP_STATE_READY_TIMEOUT_MS);
+ for (;;) {
+ if (__q6core_is_adsp_ready(g_core)) {
+ ret = true;
+ break;
+ }
+
+ if (!time_after(timeout, jiffies)) {
+ ret = false;
+ break;
+ }
+ }
+
+ mutex_unlock(&g_core->lock);
+ return ret;
+=======
+ timeout = jiffies + msecs_to_jiffies(ADSP_STATE_READY_TIMEOUT_MS);
+ for (;;) {
+ if (__q6core_is_adsp_ready(g_core))
+ return true;
+
+ if (!time_after(timeout, jiffies))
+ return false;
+ }
+
+ return false;
+>>>>>>>
+}
+EXPORT_SYMBOL_GPL(q6core_is_adsp_ready);
+
+static int q6core_probe(struct apr_device *adev)
+{
+ g_core = kzalloc(sizeof(*g_core), GFP_KERNEL);
+ if (!g_core)
+ return -ENOMEM;
+
+ dev_set_drvdata(&adev->dev, g_core);
+
+<<<<<<<
+ g_core->adev = adev;
+ init_waitqueue_head(&g_core->wait);
+
+ /* get adsp state */
+ if (q6core_is_adsp_ready()) {
+ q6core_get_fwk_versions(g_core);
+ if (!g_core->fwk_version_supported)
+ q6core_get_svc_versions(g_core);
+ }
+
+=======
+ mutex_init(&g_core->lock);
+ g_core->adev = adev;
+ init_waitqueue_head(&g_core->wait);
+>>>>>>>
+ return 0;
+}
+
+static int q6core_exit(struct apr_device *adev)
+{
+ struct q6core *core = dev_get_drvdata(&adev->dev);
+
+ if (core->fwk_version_supported)
+ kfree(core->fwk_version);
+ if (core->get_version_supported)
+ kfree(core->svc_version);
+
+<<<<<<<
+ g_core = NULL;
+ kfree(core);
+=======
+ kfree(core);
+ g_core = NULL;
+>>>>>>>
+
+ return 0;
+}
+
+static const struct of_device_id q6core_device_id[] = {
+ { .compatible = "qcom,q6core" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, q6core_device_id);
+
+static struct apr_driver qcom_q6core_driver = {
+ .probe = q6core_probe,
+ .remove = q6core_exit,
+ .callback = q6core_callback,
+ .driver = {
+ .name = "qcom-q6core",
+ .of_match_table = of_match_ptr(q6core_device_id),
+ },
+};
+
+module_apr_driver(qcom_q6core_driver);
+MODULE_DESCRIPTION("q6 core");
+MODULE_LICENSE("GPL v2");
diff --git a/rr-cache/651becc8f80dad1e588d533860a7befbceb8be94/thisimage b/rr-cache/651becc8f80dad1e588d533860a7befbceb8be94/thisimage
new file mode 100644
index 0000000..9850334
--- /dev/null
+++ b/rr-cache/651becc8f80dad1e588d533860a7befbceb8be94/thisimage
@@ -0,0 +1,554 @@
+// SPDX-License-Identifier: GPL-2.0
+<<<<<<<
+/*
+ * Copyright (c) 2011-2017, The Linux Foundation
+ * Copyright (c) 2018, Linaro Limited
+ */
+=======
+// Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+// Copyright (c) 2018, Linaro Limited
+>>>>>>>
+
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/of.h>
+<<<<<<<
+#include <linux/jiffies.h>
+#include <linux/wait.h>
+#include <linux/soc/qcom/apr.h>
+=======
+#include <linux/of_platform.h>
+#include <linux/jiffies.h>
+#include <linux/wait.h>
+#include <linux/soc/qcom/apr.h>
+#include "q6core.h"
+>>>>>>>
+#include "q6dsp-errno.h"
+
+#define ADSP_STATE_READY_TIMEOUT_MS 3000
+#define Q6_READY_TIMEOUT_MS 100
+#define AVCS_CMD_ADSP_EVENT_GET_STATE 0x0001290C
+#define AVCS_CMDRSP_ADSP_EVENT_GET_STATE 0x0001290D
+#define AVCS_GET_VERSIONS 0x00012905
+#define AVCS_GET_VERSIONS_RSP 0x00012906
+#define AVCS_CMD_GET_FWK_VERSION 0x001292c
+#define AVCS_CMDRSP_GET_FWK_VERSION 0x001292d
+
+<<<<<<<
+=======
+
+>>>>>>>
+struct avcs_svc_info {
+ uint32_t service_id;
+ uint32_t version;
+} __packed;
+
+struct avcs_cmdrsp_get_version {
+ uint32_t build_id;
+ uint32_t num_services;
+ struct avcs_svc_info svc_api_info[];
+} __packed;
+
+/* for ADSP2.8 and above */
+struct avcs_svc_api_info {
+ uint32_t service_id;
+ uint32_t api_version;
+ uint32_t api_branch_version;
+} __packed;
+
+struct avcs_cmdrsp_get_fwk_version {
+ uint32_t build_major_version;
+ uint32_t build_minor_version;
+ uint32_t build_branch_version;
+ uint32_t build_subbranch_version;
+ uint32_t num_services;
+ struct avcs_svc_api_info svc_api_info[];
+} __packed;
+
+struct q6core {
+ struct apr_device *adev;
+ wait_queue_head_t wait;
+ uint32_t avcs_state;
+<<<<<<<
+=======
+ struct mutex lock;
+>>>>>>>
+ bool resp_received;
+ uint32_t num_services;
+ struct avcs_cmdrsp_get_fwk_version *fwk_version;
+ struct avcs_cmdrsp_get_version *svc_version;
+ bool fwk_version_supported;
+ bool get_state_supported;
+ bool get_version_supported;
+<<<<<<<
+ bool is_version_requested;
+};
+
+static struct q6core *g_core;
+
+static int q6core_callback(struct apr_device *adev, struct apr_resp_pkt *data)
+{
+ struct q6core *core = dev_get_drvdata(&adev->dev);
+ struct aprv2_ibasic_rsp_result_t *result;
+ struct apr_hdr *hdr = &data->hdr;
+
+ result = data->payload;
+ switch (hdr->opcode) {
+=======
+};
+
+struct q6core *g_core;
+
+static int q6core_callback(struct apr_device *adev,
+ struct apr_client_message *data)
+{
+ struct q6core *core = dev_get_drvdata(&adev->dev);
+ struct aprv2_ibasic_rsp_result_t *result;
+
+ result = data->payload;
+ switch (data->opcode) {
+>>>>>>>
+ case APR_BASIC_RSP_RESULT:{
+ result = data->payload;
+ switch (result->opcode) {
+ case AVCS_GET_VERSIONS:
+ if (result->status == ADSP_EUNSUPPORTED)
+ core->get_version_supported = false;
+ core->resp_received = true;
+ break;
+ case AVCS_CMD_GET_FWK_VERSION:
+ if (result->status == ADSP_EUNSUPPORTED)
+ core->fwk_version_supported = false;
+ core->resp_received = true;
+ break;
+ case AVCS_CMD_ADSP_EVENT_GET_STATE:
+ if (result->status == ADSP_EUNSUPPORTED)
+ core->get_state_supported = false;
+ core->resp_received = true;
+ break;
+ }
+ break;
+ }
+ case AVCS_CMDRSP_GET_FWK_VERSION: {
+ struct avcs_cmdrsp_get_fwk_version *fwk;
+ int bytes;
+<<<<<<<
+ fwk = data->payload;
+
+ core->fwk_version_supported = true;
+=======
+
+ fwk = data->payload;
+>>>>>>>
+ bytes = sizeof(*fwk) + fwk->num_services *
+ sizeof(fwk->svc_api_info[0]);
+
+ core->fwk_version = kzalloc(bytes, GFP_ATOMIC);
+ if (!core->fwk_version)
+ return -ENOMEM;
+
+ memcpy(core->fwk_version, data->payload, bytes);
+
+<<<<<<<
+=======
+ core->fwk_version_supported = true;
+>>>>>>>
+ core->resp_received = true;
+
+ break;
+ }
+ case AVCS_GET_VERSIONS_RSP: {
+ struct avcs_cmdrsp_get_version *v;
+ int len;
+<<<<<<<
+ v = data->payload;
+ core->get_version_supported = true;
+=======
+
+ v = data->payload;
+>>>>>>>
+
+ len = sizeof(*v) + v->num_services * sizeof(v->svc_api_info[0]);
+
+ core->svc_version = kzalloc(len, GFP_ATOMIC);
+ if (!core->svc_version)
+ return -ENOMEM;
+
+ memcpy(core->svc_version, data->payload, len);
+
+<<<<<<<
+=======
+ core->get_version_supported = true;
+>>>>>>>
+ core->resp_received = true;
+
+ break;
+ }
+ case AVCS_CMDRSP_ADSP_EVENT_GET_STATE:
+ core->get_state_supported = true;
+ core->avcs_state = result->opcode;
+
+ core->resp_received = true;
+ break;
+ default:
+ dev_err(&adev->dev, "Message id from adsp core svc: 0x%x\n",
+<<<<<<<
+ data->opcode);
+=======
+ hdr->opcode);
+>>>>>>>
+ break;
+ }
+
+ if (core->resp_received)
+ wake_up(&core->wait);
+
+ return 0;
+}
+
+static int q6core_get_fwk_versions(struct q6core *core)
+{
+ struct apr_device *adev = core->adev;
+<<<<<<<
+ struct apr_hdr hdr = {0};
+ int rc;
+
+ core->fwk_version_supported = true;
+ hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ hdr.pkt_size = APR_HDR_SIZE;
+ hdr.opcode = AVCS_CMD_GET_FWK_VERSION;
+
+ rc = apr_send_pkt(adev, &hdr);
+=======
+ struct apr_pkt pkt;
+ int rc;
+
+ pkt.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ pkt.hdr.pkt_size = APR_HDR_SIZE;
+ pkt.hdr.opcode = AVCS_CMD_GET_FWK_VERSION;
+
+ rc = apr_send_pkt(adev, &pkt);
+>>>>>>>
+ if (rc < 0)
+ return rc;
+
+ rc = wait_event_timeout(core->wait, (core->resp_received),
+ msecs_to_jiffies(Q6_READY_TIMEOUT_MS));
+ if (rc > 0 && core->resp_received) {
+ core->resp_received = false;
+<<<<<<<
+ return 0;
+=======
+
+ if (!core->fwk_version_supported)
+ return -ENOTSUPP;
+ else
+ return 0;
+>>>>>>>
+ }
+
+
+ return rc;
+}
+
+static int q6core_get_svc_versions(struct q6core *core)
+{
+ struct apr_device *adev = core->adev;
+<<<<<<<
+ struct apr_hdr hdr = {0};
+ int rc;
+
+ core->get_version_supported = true;
+ hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ hdr.pkt_size = APR_HDR_SIZE;
+ hdr.opcode = AVCS_GET_VERSIONS;
+
+ rc = apr_send_pkt(adev, &hdr);
+=======
+ struct apr_pkt pkt;
+ int rc;
+
+ pkt.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ pkt.hdr.pkt_size = APR_HDR_SIZE;
+ pkt.hdr.opcode = AVCS_GET_VERSIONS;
+
+ rc = apr_send_pkt(adev, &pkt);
+>>>>>>>
+ if (rc < 0)
+ return rc;
+
+ rc = wait_event_timeout(core->wait, (core->resp_received),
+ msecs_to_jiffies(Q6_READY_TIMEOUT_MS));
+ if (rc > 0 && core->resp_received) {
+ core->resp_received = false;
+ return 0;
+ }
+
+ return rc;
+}
+
+static bool __q6core_is_adsp_ready(struct q6core *core)
+{
+ struct apr_device *adev = core->adev;
+<<<<<<<
+ struct apr_hdr hdr = {0};
+=======
+ struct apr_pkt pkt;
+>>>>>>>
+ int rc;
+
+ core->get_state_supported = false;
+
+<<<<<<<
+ hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ hdr.pkt_size = APR_HDR_SIZE;
+ hdr.opcode = AVCS_CMD_ADSP_EVENT_GET_STATE;
+
+ rc = apr_send_pkt(adev, &hdr);
+=======
+ pkt.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ pkt.hdr.pkt_size = APR_HDR_SIZE;
+ pkt.hdr.opcode = AVCS_CMD_ADSP_EVENT_GET_STATE;
+
+ rc = apr_send_pkt(adev, &pkt);
+>>>>>>>
+ if (rc < 0)
+ return false;
+
+ rc = wait_event_timeout(core->wait, (core->resp_received),
+ msecs_to_jiffies(Q6_READY_TIMEOUT_MS));
+ if (rc > 0 && core->resp_received) {
+ core->resp_received = false;
+
+<<<<<<<
+ if (core->avcs_state == 0x1)
+=======
+ if (core->avcs_state)
+>>>>>>>
+ return true;
+ }
+
+ /* assume that the adsp is up if we not support this command */
+ if (!core->get_state_supported)
+ return true;
+
+ return false;
+}
+
+/**
+<<<<<<<
+ * q6core_get_svc_api_info() - Get version number of a service.
+ *
+ * @svc_id: service id of the service.
+ * @ainfo: Valid struct pointer to fill svc api information.
+ *
+ * Return: zero on success and error code on failure or unsupported
+ */
+int q6core_get_svc_api_info(int svc_id, struct q6core_svc_api_info *ainfo)
+{
+ int i;
+ int ret = -ENOTSUPP;
+
+ if (!g_core || !ainfo)
+ return 0;
+
+ mutex_lock(&g_core->lock);
+ if (!g_core->is_version_requested) {
+ if (q6core_get_fwk_versions(g_core) == -ENOTSUPP)
+ q6core_get_svc_versions(g_core);
+ g_core->is_version_requested = true;
+ }
+
+=======
+ * q6core_get_svc_version() - Get version number of a service.
+ *
+ * @svc_id: service id of the service.
+ *
+ * Return: Will be a valid version number on success and zero on failure.
+ * version number returned contains bits 0 to 15 as Minor version number
+ * Bits 16 to 31 as Major version number
+ */
+uint32_t q6core_get_svc_version(int svc_id)
+{
+ int i;
+
+ if (!g_core)
+ return 0;
+
+>>>>>>>
+ if (g_core->fwk_version_supported) {
+ for (i = 0; i < g_core->fwk_version->num_services; i++) {
+ struct avcs_svc_api_info *info;
+
+ info = &g_core->fwk_version->svc_api_info[i];
+<<<<<<<
+ if (svc_id != info->service_id)
+ continue;
+
+ ainfo->api_version = info->api_version;
+ ainfo->api_branch_version = info->api_branch_version;
+ ret = 0;
+ break;
+=======
+ if (svc_id == info->service_id)
+ return info->api_version;
+>>>>>>>
+ }
+ } else if (g_core->get_version_supported) {
+ for (i = 0; i < g_core->svc_version->num_services; i++) {
+ struct avcs_svc_info *info;
+
+ info = &g_core->svc_version->svc_api_info[i];
+<<<<<<<
+ if (svc_id != info->service_id)
+ continue;
+
+ ainfo->api_version = info->version;
+ ainfo->api_branch_version = 0;
+ ret = 0;
+ break;
+ }
+ }
+
+ mutex_unlock(&g_core->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(q6core_get_svc_api_info);
+=======
+ pr_err("SVC API Info SVC-ID: %x vs %x API-VER: %x\n",
+ info->service_id, svc_id, info->version);
+
+ if (svc_id == info->service_id)
+ return info->version;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(q6core_get_svc_version);
+>>>>>>>
+
+/**
+ * q6core_is_adsp_ready() - Get status of adsp
+ *
+ * Return: Will be an true if adsp is ready and false if not.
+ */
+bool q6core_is_adsp_ready(void)
+{
+ unsigned long timeout;
+<<<<<<<
+=======
+ bool ret = false;
+>>>>>>>
+
+ if (!g_core)
+ return false;
+
+<<<<<<<
+ mutex_lock(&g_core->lock);
+ timeout = jiffies + msecs_to_jiffies(ADSP_STATE_READY_TIMEOUT_MS);
+ for (;;) {
+ if (__q6core_is_adsp_ready(g_core)) {
+ ret = true;
+ break;
+ }
+
+ if (!time_after(timeout, jiffies)) {
+ ret = false;
+ break;
+ }
+ }
+
+ mutex_unlock(&g_core->lock);
+ return ret;
+=======
+ timeout = jiffies + msecs_to_jiffies(ADSP_STATE_READY_TIMEOUT_MS);
+ for (;;) {
+ if (__q6core_is_adsp_ready(g_core))
+ return true;
+
+ if (!time_after(timeout, jiffies))
+ return false;
+ }
+
+ return false;
+>>>>>>>
+}
+EXPORT_SYMBOL_GPL(q6core_is_adsp_ready);
+
+static int q6core_probe(struct apr_device *adev)
+{
+ g_core = kzalloc(sizeof(*g_core), GFP_KERNEL);
+ if (!g_core)
+ return -ENOMEM;
+
+ dev_set_drvdata(&adev->dev, g_core);
+
+<<<<<<<
+ g_core->adev = adev;
+ init_waitqueue_head(&g_core->wait);
+
+ /* get adsp state */
+ if (q6core_is_adsp_ready()) {
+ q6core_get_fwk_versions(g_core);
+ if (!g_core->fwk_version_supported)
+ q6core_get_svc_versions(g_core);
+ }
+
+=======
+ mutex_init(&g_core->lock);
+ g_core->adev = adev;
+ init_waitqueue_head(&g_core->wait);
+>>>>>>>
+ return 0;
+}
+
+static int q6core_exit(struct apr_device *adev)
+{
+ struct q6core *core = dev_get_drvdata(&adev->dev);
+
+ if (core->fwk_version_supported)
+ kfree(core->fwk_version);
+ if (core->get_version_supported)
+ kfree(core->svc_version);
+
+<<<<<<<
+ g_core = NULL;
+ kfree(core);
+=======
+ kfree(core);
+ g_core = NULL;
+>>>>>>>
+
+ return 0;
+}
+
+static const struct of_device_id q6core_device_id[] = {
+ { .compatible = "qcom,q6core" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, q6core_device_id);
+
+static struct apr_driver qcom_q6core_driver = {
+ .probe = q6core_probe,
+ .remove = q6core_exit,
+ .callback = q6core_callback,
+ .driver = {
+ .name = "qcom-q6core",
+ .of_match_table = of_match_ptr(q6core_device_id),
+ },
+};
+
+module_apr_driver(qcom_q6core_driver);
+MODULE_DESCRIPTION("q6 core");
+MODULE_LICENSE("GPL v2");
diff --git a/rr-cache/65a8676cb8d5da7722e86a660c8e70588e6a3297/postimage b/rr-cache/65a8676cb8d5da7722e86a660c8e70588e6a3297/postimage
new file mode 100644
index 0000000..206945b
--- /dev/null
+++ b/rr-cache/65a8676cb8d5da7722e86a660c8e70588e6a3297/postimage
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0
+# Platform
+snd-soc-lpass-cpu-objs := lpass-cpu.o
+snd-soc-lpass-platform-objs := lpass-platform.o
+snd-soc-lpass-ipq806x-objs := lpass-ipq806x.o
+snd-soc-lpass-apq8016-objs := lpass-apq8016.o
+
+obj-$(CONFIG_SND_SOC_LPASS_CPU) += snd-soc-lpass-cpu.o
+obj-$(CONFIG_SND_SOC_LPASS_PLATFORM) += snd-soc-lpass-platform.o
+obj-$(CONFIG_SND_SOC_LPASS_IPQ806X) += snd-soc-lpass-ipq806x.o
+obj-$(CONFIG_SND_SOC_LPASS_APQ8016) += snd-soc-lpass-apq8016.o
+
+# Machine
+snd-soc-storm-objs := storm.o
+snd-soc-apq8016-sbc-objs := apq8016_sbc.o
+snd-soc-apq8096-objs := apq8096.o
+
+obj-$(CONFIG_SND_SOC_STORM) += snd-soc-storm.o
+obj-$(CONFIG_SND_SOC_APQ8016_SBC) += snd-soc-apq8016-sbc.o
+obj-$(CONFIG_SND_SOC_MSM8996) += snd-soc-apq8096.o
+
+#DSP lib
+obj-$(CONFIG_SND_SOC_QDSP6) += qdsp6/
diff --git a/rr-cache/65a8676cb8d5da7722e86a660c8e70588e6a3297/preimage b/rr-cache/65a8676cb8d5da7722e86a660c8e70588e6a3297/preimage
new file mode 100644
index 0000000..5f922b6
--- /dev/null
+++ b/rr-cache/65a8676cb8d5da7722e86a660c8e70588e6a3297/preimage
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0
+# Platform
+snd-soc-lpass-cpu-objs := lpass-cpu.o
+snd-soc-lpass-platform-objs := lpass-platform.o
+snd-soc-lpass-ipq806x-objs := lpass-ipq806x.o
+snd-soc-lpass-apq8016-objs := lpass-apq8016.o
+
+obj-$(CONFIG_SND_SOC_LPASS_CPU) += snd-soc-lpass-cpu.o
+obj-$(CONFIG_SND_SOC_LPASS_PLATFORM) += snd-soc-lpass-platform.o
+obj-$(CONFIG_SND_SOC_LPASS_IPQ806X) += snd-soc-lpass-ipq806x.o
+obj-$(CONFIG_SND_SOC_LPASS_APQ8016) += snd-soc-lpass-apq8016.o
+
+# Machine
+snd-soc-storm-objs := storm.o
+snd-soc-apq8016-sbc-objs := apq8016_sbc.o
+<<<<<<<
+snd-soc-apq8096-objs := apq8096.o
+
+obj-$(CONFIG_SND_SOC_STORM) += snd-soc-storm.o
+obj-$(CONFIG_SND_SOC_APQ8016_SBC) += snd-soc-apq8016-sbc.o
+obj-$(CONFIG_SND_SOC_MSM8996) += snd-soc-apq8096.o
+=======
+snd-soc-msm8996-objs := apq8096.o
+snd-soc-apq8064-objs := apq8064.o
+
+obj-$(CONFIG_SND_SOC_STORM) += snd-soc-storm.o
+obj-$(CONFIG_SND_SOC_APQ8016_SBC) += snd-soc-apq8016-sbc.o
+obj-$(CONFIG_SND_SOC_MSM8996) += snd-soc-msm8996.o
+obj-$(CONFIG_SND_SOC_APQ8064) += snd-soc-apq8064.o
+>>>>>>>
+
+#DSP lib
+obj-$(CONFIG_SND_SOC_QDSP6) += qdsp6/
diff --git a/rr-cache/65a8676cb8d5da7722e86a660c8e70588e6a3297/thisimage b/rr-cache/65a8676cb8d5da7722e86a660c8e70588e6a3297/thisimage
new file mode 100644
index 0000000..5f922b6
--- /dev/null
+++ b/rr-cache/65a8676cb8d5da7722e86a660c8e70588e6a3297/thisimage
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0
+# Platform
+snd-soc-lpass-cpu-objs := lpass-cpu.o
+snd-soc-lpass-platform-objs := lpass-platform.o
+snd-soc-lpass-ipq806x-objs := lpass-ipq806x.o
+snd-soc-lpass-apq8016-objs := lpass-apq8016.o
+
+obj-$(CONFIG_SND_SOC_LPASS_CPU) += snd-soc-lpass-cpu.o
+obj-$(CONFIG_SND_SOC_LPASS_PLATFORM) += snd-soc-lpass-platform.o
+obj-$(CONFIG_SND_SOC_LPASS_IPQ806X) += snd-soc-lpass-ipq806x.o
+obj-$(CONFIG_SND_SOC_LPASS_APQ8016) += snd-soc-lpass-apq8016.o
+
+# Machine
+snd-soc-storm-objs := storm.o
+snd-soc-apq8016-sbc-objs := apq8016_sbc.o
+<<<<<<<
+snd-soc-apq8096-objs := apq8096.o
+
+obj-$(CONFIG_SND_SOC_STORM) += snd-soc-storm.o
+obj-$(CONFIG_SND_SOC_APQ8016_SBC) += snd-soc-apq8016-sbc.o
+obj-$(CONFIG_SND_SOC_MSM8996) += snd-soc-apq8096.o
+=======
+snd-soc-msm8996-objs := apq8096.o
+snd-soc-apq8064-objs := apq8064.o
+
+obj-$(CONFIG_SND_SOC_STORM) += snd-soc-storm.o
+obj-$(CONFIG_SND_SOC_APQ8016_SBC) += snd-soc-apq8016-sbc.o
+obj-$(CONFIG_SND_SOC_MSM8996) += snd-soc-msm8996.o
+obj-$(CONFIG_SND_SOC_APQ8064) += snd-soc-apq8064.o
+>>>>>>>
+
+#DSP lib
+obj-$(CONFIG_SND_SOC_QDSP6) += qdsp6/
diff --git a/rr-cache/65c17a388bfd3e72fad64d277f9613b493c799f3/preimage b/rr-cache/65c17a388bfd3e72fad64d277f9613b493c799f3/preimage
new file mode 100644
index 0000000..55dfaa6
--- /dev/null
+++ b/rr-cache/65c17a388bfd3e72fad64d277f9613b493c799f3/preimage
@@ -0,0 +1,278 @@
+config QCOM_GDSC
+ bool
+ select PM_GENERIC_DOMAINS if PM
+
+config QCOM_RPMCC
+ bool
+
+config COMMON_CLK_QCOM
+ tristate "Support for Qualcomm's clock controllers"
+ depends on OF
+ depends on ARCH_QCOM || COMPILE_TEST
+ select REGMAP_MMIO
+ select RESET_CONTROLLER
+
+config QCOM_A53PLL
+ tristate "MSM8916 A53 PLL"
+ depends on COMMON_CLK_QCOM
+ default ARCH_QCOM
+ help
+ Support for the A53 PLL on MSM8916 devices. It provides
+ the CPU with frequencies above 1GHz.
+ Say Y if you want to support higher CPU frequencies on MSM8916
+ devices.
+
+config QCOM_CLK_APCS_MSM8916
+ tristate "MSM8916 APCS Clock Controller"
+ depends on COMMON_CLK_QCOM
+ depends on QCOM_APCS_IPC || COMPILE_TEST
+ default ARCH_QCOM
+ help
+ Support for the APCS Clock Controller on msm8916 devices. The
+ APCS is managing the mux and divider which feeds the CPUs.
+ Say Y if you want to support CPU frequency scaling on devices
+ such as msm8916.
+
+config QCOM_CLK_RPM
+ tristate "RPM based Clock Controller"
+ depends on COMMON_CLK_QCOM && MFD_QCOM_RPM
+ select QCOM_RPMCC
+ help
+ The RPM (Resource Power Manager) is a dedicated hardware engine for
+ managing the shared SoC resources in order to keep the lowest power
+ profile. It communicates with other hardware subsystems via shared
+ memory and accepts clock requests, aggregates the requests and turns
+ the clocks on/off or scales them on demand.
+ Say Y if you want to support the clocks exposed by the RPM on
+ platforms such as apq8064, msm8660, msm8960 etc.
+
+config QCOM_CLK_SMD_RPM
+ tristate "RPM over SMD based Clock Controller"
+ depends on COMMON_CLK_QCOM && QCOM_SMD_RPM
+ select QCOM_RPMCC
+ help
+ The RPM (Resource Power Manager) is a dedicated hardware engine for
+ managing the shared SoC resources in order to keep the lowest power
+ profile. It communicates with other hardware subsystems via shared
+ memory and accepts clock requests, aggregates the requests and turns
+ the clocks on/off or scales them on demand.
+ Say Y if you want to support the clocks exposed by the RPM on
+ platforms such as apq8016, apq8084, msm8974 etc.
+
+config APQ_GCC_8084
+ tristate "APQ8084 Global Clock Controller"
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on apq8084 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, SATA, PCIe, etc.
+
+config APQ_MMCC_8084
+ tristate "APQ8084 Multimedia Clock Controller"
+ select APQ_GCC_8084
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the multimedia clock controller on apq8084 devices.
+ Say Y if you want to support multimedia devices such as display,
+ graphics, video encode/decode, camera, etc.
+
+config IPQ_GCC_4019
+ tristate "IPQ4019 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on ipq4019 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, etc.
+
+config IPQ_GCC_806X
+ tristate "IPQ806x Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on ipq806x devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, etc.
+
+config IPQ_LCC_806X
+ tristate "IPQ806x LPASS Clock Controller"
+ select IPQ_GCC_806X
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the LPASS clock controller on ipq806x devices.
+ Say Y if you want to use audio devices such as i2s, pcm,
+ S/PDIF, etc.
+
+config IPQ_GCC_8074
+ tristate "IPQ8074 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for global clock controller on ipq8074 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, etc. Select this for the root clock
+ of ipq8074.
+
+config MSM_GCC_8660
+ tristate "MSM8660 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8660 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, etc.
+
+config MSM_GCC_8916
+ tristate "MSM8916 Global Clock Controller"
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8916 devices.
+ Say Y if you want to use devices such as UART, SPI i2c, USB,
+ SD/eMMC, display, graphics, camera etc.
+
+config MSM_GCC_8960
+ tristate "APQ8064/MSM8960 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on apq8064/msm8960 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, SATA, PCIe, etc.
+
+config MSM_LCC_8960
+ tristate "APQ8064/MSM8960 LPASS Clock Controller"
+ select MSM_GCC_8960
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the LPASS clock controller on apq8064/msm8960 devices.
+ Say Y if you want to use audio devices such as i2s, pcm,
+ SLIMBus, etc.
+
+config MDM_GCC_9615
+ tristate "MDM9615 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on mdm9615 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, etc.
+
+config MDM_LCC_9615
+ tristate "MDM9615 LPASS Clock Controller"
+ select MDM_GCC_9615
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the LPASS clock controller on mdm9615 devices.
+ Say Y if you want to use audio devices such as i2s, pcm,
+ SLIMBus, etc.
+
+config MSM_MMCC_8960
+ tristate "MSM8960 Multimedia Clock Controller"
+ select MSM_GCC_8960
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the multimedia clock controller on msm8960 devices.
+ Say Y if you want to support multimedia devices such as display,
+ graphics, video encode/decode, camera, etc.
+
+config MSM_GCC_8974
+ tristate "MSM8974 Global Clock Controller"
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8974 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, SATA, PCIe, etc.
+
+config MSM_MMCC_8974
+ tristate "MSM8974 Multimedia Clock Controller"
+ select MSM_GCC_8974
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the multimedia clock controller on msm8974 devices.
+ Say Y if you want to support multimedia devices such as display,
+ graphics, video encode/decode, camera, etc.
+
+config MSM_GCC_8994
+ tristate "MSM8994 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8994 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, UFS, SD/eMMC, PCIe, etc.
+
+config MSM_GCC_8996
+ tristate "MSM8996 Global Clock Controller"
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8996 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, UFS, SD/eMMC, PCIe, etc.
+
+config MSM_MMCC_8996
+ tristate "MSM8996 Multimedia Clock Controller"
+ select MSM_GCC_8996
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the multimedia clock controller on msm8996 devices.
+ Say Y if you want to support multimedia devices such as display,
+ graphics, video encode/decode, camera, etc.
+
+<<<<<<<
+config MSM_GCC_8998
+ tristate "MSM8998 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8998 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, UFS, SD/eMMC, PCIe, etc.
+
+config SDM_GCC_845
+ tristate "SDM845 Global Clock Controller"
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on SDM845 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2C, USB, UFS, SDDC, PCIe, etc.
+
+config SDM_VIDEOCC_845
+ tristate "SDM845 Video Clock Controller"
+ depends on COMMON_CLK_QCOM
+ select SDM_GCC_845
+ select QCOM_GDSC
+ help
+ Support for the video clock controller on SDM845 devices.
+ Say Y if you want to support video devices and functionality such as
+ video encode and decode.
+=======
+config SDM_GCC_845
+ tristate "SDM845 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on Qualcomm Technologies, Inc
+ sdm845 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, UFS, SD/eMMC, PCIe, etc.
+
+>>>>>>>
+
+config SPMI_PMIC_CLKDIV
+ tristate "SPMI PMIC clkdiv Support"
+ depends on (COMMON_CLK_QCOM && SPMI) || COMPILE_TEST
+ help
+ This driver supports the clkdiv functionality on the Qualcomm
+ Technologies, Inc. SPMI PMIC. It configures the frequency of
+ clkdiv outputs of the PMIC. These clocks are typically wired
+ through alternate functions on GPIO pins.
+
+<<<<<<<
+=======
+config MSM_APCC_8996
+ tristate "MSM8996 CPU Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the CPU clock controller on msm8996 devices.
+ Say Y if you want to support CPU clock scaling using CPUfreq
+ drivers for dyanmic power management.
+>>>>>>>
diff --git a/rr-cache/65c17a388bfd3e72fad64d277f9613b493c799f3/preimage.1 b/rr-cache/65c17a388bfd3e72fad64d277f9613b493c799f3/preimage.1
new file mode 100644
index 0000000..55dfaa6
--- /dev/null
+++ b/rr-cache/65c17a388bfd3e72fad64d277f9613b493c799f3/preimage.1
@@ -0,0 +1,278 @@
+config QCOM_GDSC
+ bool
+ select PM_GENERIC_DOMAINS if PM
+
+config QCOM_RPMCC
+ bool
+
+config COMMON_CLK_QCOM
+ tristate "Support for Qualcomm's clock controllers"
+ depends on OF
+ depends on ARCH_QCOM || COMPILE_TEST
+ select REGMAP_MMIO
+ select RESET_CONTROLLER
+
+config QCOM_A53PLL
+ tristate "MSM8916 A53 PLL"
+ depends on COMMON_CLK_QCOM
+ default ARCH_QCOM
+ help
+ Support for the A53 PLL on MSM8916 devices. It provides
+ the CPU with frequencies above 1GHz.
+ Say Y if you want to support higher CPU frequencies on MSM8916
+ devices.
+
+config QCOM_CLK_APCS_MSM8916
+ tristate "MSM8916 APCS Clock Controller"
+ depends on COMMON_CLK_QCOM
+ depends on QCOM_APCS_IPC || COMPILE_TEST
+ default ARCH_QCOM
+ help
+ Support for the APCS Clock Controller on msm8916 devices. The
+ APCS is managing the mux and divider which feeds the CPUs.
+ Say Y if you want to support CPU frequency scaling on devices
+ such as msm8916.
+
+config QCOM_CLK_RPM
+ tristate "RPM based Clock Controller"
+ depends on COMMON_CLK_QCOM && MFD_QCOM_RPM
+ select QCOM_RPMCC
+ help
+ The RPM (Resource Power Manager) is a dedicated hardware engine for
+ managing the shared SoC resources in order to keep the lowest power
+ profile. It communicates with other hardware subsystems via shared
+ memory and accepts clock requests, aggregates the requests and turns
+ the clocks on/off or scales them on demand.
+ Say Y if you want to support the clocks exposed by the RPM on
+ platforms such as apq8064, msm8660, msm8960 etc.
+
+config QCOM_CLK_SMD_RPM
+ tristate "RPM over SMD based Clock Controller"
+ depends on COMMON_CLK_QCOM && QCOM_SMD_RPM
+ select QCOM_RPMCC
+ help
+ The RPM (Resource Power Manager) is a dedicated hardware engine for
+ managing the shared SoC resources in order to keep the lowest power
+ profile. It communicates with other hardware subsystems via shared
+ memory and accepts clock requests, aggregates the requests and turns
+ the clocks on/off or scales them on demand.
+ Say Y if you want to support the clocks exposed by the RPM on
+ platforms such as apq8016, apq8084, msm8974 etc.
+
+config APQ_GCC_8084
+ tristate "APQ8084 Global Clock Controller"
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on apq8084 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, SATA, PCIe, etc.
+
+config APQ_MMCC_8084
+ tristate "APQ8084 Multimedia Clock Controller"
+ select APQ_GCC_8084
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the multimedia clock controller on apq8084 devices.
+ Say Y if you want to support multimedia devices such as display,
+ graphics, video encode/decode, camera, etc.
+
+config IPQ_GCC_4019
+ tristate "IPQ4019 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on ipq4019 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, etc.
+
+config IPQ_GCC_806X
+ tristate "IPQ806x Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on ipq806x devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, etc.
+
+config IPQ_LCC_806X
+ tristate "IPQ806x LPASS Clock Controller"
+ select IPQ_GCC_806X
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the LPASS clock controller on ipq806x devices.
+ Say Y if you want to use audio devices such as i2s, pcm,
+ S/PDIF, etc.
+
+config IPQ_GCC_8074
+ tristate "IPQ8074 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for global clock controller on ipq8074 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, etc. Select this for the root clock
+ of ipq8074.
+
+config MSM_GCC_8660
+ tristate "MSM8660 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8660 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, etc.
+
+config MSM_GCC_8916
+ tristate "MSM8916 Global Clock Controller"
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8916 devices.
+ Say Y if you want to use devices such as UART, SPI i2c, USB,
+ SD/eMMC, display, graphics, camera etc.
+
+config MSM_GCC_8960
+ tristate "APQ8064/MSM8960 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on apq8064/msm8960 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, SATA, PCIe, etc.
+
+config MSM_LCC_8960
+ tristate "APQ8064/MSM8960 LPASS Clock Controller"
+ select MSM_GCC_8960
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the LPASS clock controller on apq8064/msm8960 devices.
+ Say Y if you want to use audio devices such as i2s, pcm,
+ SLIMBus, etc.
+
+config MDM_GCC_9615
+ tristate "MDM9615 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on mdm9615 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, etc.
+
+config MDM_LCC_9615
+ tristate "MDM9615 LPASS Clock Controller"
+ select MDM_GCC_9615
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the LPASS clock controller on mdm9615 devices.
+ Say Y if you want to use audio devices such as i2s, pcm,
+ SLIMBus, etc.
+
+config MSM_MMCC_8960
+ tristate "MSM8960 Multimedia Clock Controller"
+ select MSM_GCC_8960
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the multimedia clock controller on msm8960 devices.
+ Say Y if you want to support multimedia devices such as display,
+ graphics, video encode/decode, camera, etc.
+
+config MSM_GCC_8974
+ tristate "MSM8974 Global Clock Controller"
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8974 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, SATA, PCIe, etc.
+
+config MSM_MMCC_8974
+ tristate "MSM8974 Multimedia Clock Controller"
+ select MSM_GCC_8974
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the multimedia clock controller on msm8974 devices.
+ Say Y if you want to support multimedia devices such as display,
+ graphics, video encode/decode, camera, etc.
+
+config MSM_GCC_8994
+ tristate "MSM8994 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8994 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, UFS, SD/eMMC, PCIe, etc.
+
+config MSM_GCC_8996
+ tristate "MSM8996 Global Clock Controller"
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8996 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, UFS, SD/eMMC, PCIe, etc.
+
+config MSM_MMCC_8996
+ tristate "MSM8996 Multimedia Clock Controller"
+ select MSM_GCC_8996
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the multimedia clock controller on msm8996 devices.
+ Say Y if you want to support multimedia devices such as display,
+ graphics, video encode/decode, camera, etc.
+
+<<<<<<<
+config MSM_GCC_8998
+ tristate "MSM8998 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8998 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, UFS, SD/eMMC, PCIe, etc.
+
+config SDM_GCC_845
+ tristate "SDM845 Global Clock Controller"
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on SDM845 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2C, USB, UFS, SDDC, PCIe, etc.
+
+config SDM_VIDEOCC_845
+ tristate "SDM845 Video Clock Controller"
+ depends on COMMON_CLK_QCOM
+ select SDM_GCC_845
+ select QCOM_GDSC
+ help
+ Support for the video clock controller on SDM845 devices.
+ Say Y if you want to support video devices and functionality such as
+ video encode and decode.
+=======
+config SDM_GCC_845
+ tristate "SDM845 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on Qualcomm Technologies, Inc
+ sdm845 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, UFS, SD/eMMC, PCIe, etc.
+
+>>>>>>>
+
+config SPMI_PMIC_CLKDIV
+ tristate "SPMI PMIC clkdiv Support"
+ depends on (COMMON_CLK_QCOM && SPMI) || COMPILE_TEST
+ help
+ This driver supports the clkdiv functionality on the Qualcomm
+ Technologies, Inc. SPMI PMIC. It configures the frequency of
+ clkdiv outputs of the PMIC. These clocks are typically wired
+ through alternate functions on GPIO pins.
+
+<<<<<<<
+=======
+config MSM_APCC_8996
+ tristate "MSM8996 CPU Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the CPU clock controller on msm8996 devices.
+ Say Y if you want to support CPU clock scaling using CPUfreq
+ drivers for dyanmic power management.
+>>>>>>>
diff --git a/rr-cache/666e8cdd499373d4f426f6a9c73ff840683446b7/preimage b/rr-cache/666e8cdd499373d4f426f6a9c73ff840683446b7/preimage
new file mode 100644
index 0000000..54b7020
--- /dev/null
+++ b/rr-cache/666e8cdd499373d4f426f6a9c73ff840683446b7/preimage
@@ -0,0 +1,943 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved.
+<<<<<<<
+=======
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+>>>>>>>
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/bug.h>
+#include <linux/export.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/regmap.h>
+#include <linux/math64.h>
+
+#include <asm/div64.h>
+
+#include "clk-rcg.h"
+#include "common.h"
+
+#define CMD_REG 0x0
+#define CMD_UPDATE BIT(0)
+#define CMD_ROOT_EN BIT(1)
+#define CMD_DIRTY_CFG BIT(4)
+#define CMD_DIRTY_N BIT(5)
+#define CMD_DIRTY_M BIT(6)
+#define CMD_DIRTY_D BIT(7)
+#define CMD_ROOT_OFF BIT(31)
+
+#define CFG_REG 0x4
+#define CFG_SRC_DIV_SHIFT 0
+#define CFG_SRC_SEL_SHIFT 8
+#define CFG_SRC_SEL_MASK (0x7 << CFG_SRC_SEL_SHIFT)
+#define CFG_MODE_SHIFT 12
+#define CFG_MODE_MASK (0x3 << CFG_MODE_SHIFT)
+#define CFG_MODE_DUAL_EDGE (0x2 << CFG_MODE_SHIFT)
+#define CFG_HW_CLK_CTRL_MASK BIT(20)
+
+#define M_REG 0x8
+#define N_REG 0xc
+#define D_REG 0x10
+
+enum freq_policy {
+ FLOOR,
+ CEIL,
+};
+
+static int clk_rcg2_is_enabled(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ u32 cmd;
+ int ret;
+
+ ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
+ if (ret)
+ return ret;
+
+ return (cmd & CMD_ROOT_OFF) == 0;
+}
+
+static u8 clk_rcg2_get_parent(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ int num_parents = clk_hw_get_num_parents(hw);
+ u32 cfg;
+ int i, ret;
+
+ ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+ if (ret)
+ goto err;
+
+ cfg &= CFG_SRC_SEL_MASK;
+ cfg >>= CFG_SRC_SEL_SHIFT;
+
+ for (i = 0; i < num_parents; i++)
+ if (cfg == rcg->parent_map[i].cfg)
+ return i;
+
+err:
+ pr_debug("%s: Clock %s has invalid parent, using default.\n",
+ __func__, clk_hw_get_name(hw));
+ return 0;
+}
+
+static int update_config(struct clk_rcg2 *rcg)
+{
+ int count, ret;
+ u32 cmd;
+ struct clk_hw *hw = &rcg->clkr.hw;
+ const char *name = clk_hw_get_name(hw);
+
+ ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
+ CMD_UPDATE, CMD_UPDATE);
+ if (ret)
+ return ret;
+
+ /* Wait for update to take effect */
+ for (count = 500; count > 0; count--) {
+ ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
+ if (ret)
+ return ret;
+ if (!(cmd & CMD_UPDATE))
+ return 0;
+ udelay(1);
+ }
+
+ WARN(1, "%s: rcg didn't update its configuration.", name);
+ return 0;
+}
+
+static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ int ret;
+ u32 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
+
+ ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
+ CFG_SRC_SEL_MASK, cfg);
+ if (ret)
+ return ret;
+
+ return update_config(rcg);
+}
+
+/*
+ * Calculate m/n:d rate
+ *
+ * parent_rate m
+ * rate = ----------- x ---
+ * hid_div n
+ */
+static unsigned long
+calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
+{
+ if (hid_div) {
+ rate *= 2;
+ rate /= hid_div + 1;
+ }
+
+ if (mode) {
+ u64 tmp = rate;
+ tmp *= m;
+ do_div(tmp, n);
+ rate = tmp;
+ }
+
+ return rate;
+}
+
+static unsigned long
+clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ u32 cfg, hid_div, m = 0, n = 0, mode = 0, mask;
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+
+ if (rcg->mnd_width) {
+ mask = BIT(rcg->mnd_width) - 1;
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + M_REG, &m);
+ m &= mask;
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + N_REG, &n);
+ n = ~n;
+ n &= mask;
+ n += m;
+ mode = cfg & CFG_MODE_MASK;
+ mode >>= CFG_MODE_SHIFT;
+ }
+
+ mask = BIT(rcg->hid_width) - 1;
+ hid_div = cfg >> CFG_SRC_DIV_SHIFT;
+ hid_div &= mask;
+
+ return calc_rate(parent_rate, m, n, mode, hid_div);
+}
+
+static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
+ struct clk_rate_request *req,
+ enum freq_policy policy)
+{
+ unsigned long clk_flags, rate = req->rate;
+ struct clk_hw *p;
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ int index;
+
+ switch (policy) {
+ case FLOOR:
+ f = qcom_find_freq_floor(f, rate);
+ break;
+ case CEIL:
+ f = qcom_find_freq(f, rate);
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ if (!f)
+ return -EINVAL;
+
+ index = qcom_find_src_index(hw, rcg->parent_map, f->src);
+ if (index < 0)
+ return index;
+
+ clk_flags = clk_hw_get_flags(hw);
+ p = clk_hw_get_parent_by_index(hw, index);
+ if (clk_flags & CLK_SET_RATE_PARENT) {
+ rate = f->freq;
+ if (f->pre_div) {
+ rate /= 2;
+ rate *= f->pre_div + 1;
+ }
+
+ if (f->n) {
+ u64 tmp = rate;
+ tmp = tmp * f->n;
+ do_div(tmp, f->m);
+ rate = tmp;
+ }
+ } else {
+ rate = clk_hw_get_rate(p);
+ }
+ req->best_parent_hw = p;
+ req->best_parent_rate = rate;
+ req->rate = f->freq;
+
+ return 0;
+}
+
+static int clk_rcg2_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, CEIL);
+}
+
+static int clk_rcg2_determine_floor_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR);
+}
+
+static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
+{
+ u32 cfg, mask;
+ struct clk_hw *hw = &rcg->clkr.hw;
+ int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src);
+
+ if (index < 0)
+ return index;
+
+ if (rcg->mnd_width && f->n) {
+ mask = BIT(rcg->mnd_width) - 1;
+ ret = regmap_update_bits(rcg->clkr.regmap,
+ rcg->cmd_rcgr + M_REG, mask, f->m);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(rcg->clkr.regmap,
+ rcg->cmd_rcgr + N_REG, mask, ~(f->n - f->m));
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(rcg->clkr.regmap,
+ rcg->cmd_rcgr + D_REG, mask, ~f->n);
+ if (ret)
+ return ret;
+ }
+
+ mask = BIT(rcg->hid_width) - 1;
+ mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK | CFG_HW_CLK_CTRL_MASK;
+ cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
+ cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
+ if (rcg->mnd_width && f->n && (f->m != f->n))
+ cfg |= CFG_MODE_DUAL_EDGE;
+
+ return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
+ mask, cfg);
+}
+
+static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
+{
+ int ret;
+
+ ret = __clk_rcg2_configure(rcg, f);
+ if (ret)
+ return ret;
+
+ return update_config(rcg);
+}
+
+static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
+ enum freq_policy policy)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ const struct freq_tbl *f;
+
+ switch (policy) {
+ case FLOOR:
+ f = qcom_find_freq_floor(rcg->freq_tbl, rate);
+ break;
+ case CEIL:
+ f = qcom_find_freq(rcg->freq_tbl, rate);
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ if (!f)
+ return -EINVAL;
+
+ return clk_rcg2_configure(rcg, f);
+}
+
+static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return __clk_rcg2_set_rate(hw, rate, CEIL);
+}
+
+static int clk_rcg2_set_floor_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return __clk_rcg2_set_rate(hw, rate, FLOOR);
+}
+
+static int clk_rcg2_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ return __clk_rcg2_set_rate(hw, rate, CEIL);
+}
+
+static int clk_rcg2_set_floor_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ return __clk_rcg2_set_rate(hw, rate, FLOOR);
+}
+
+const struct clk_ops clk_rcg2_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .determine_rate = clk_rcg2_determine_rate,
+ .set_rate = clk_rcg2_set_rate,
+ .set_rate_and_parent = clk_rcg2_set_rate_and_parent,
+};
+EXPORT_SYMBOL_GPL(clk_rcg2_ops);
+
+const struct clk_ops clk_rcg2_floor_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .determine_rate = clk_rcg2_determine_floor_rate,
+ .set_rate = clk_rcg2_set_floor_rate,
+ .set_rate_and_parent = clk_rcg2_set_floor_rate_and_parent,
+};
+EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops);
+
+struct frac_entry {
+ int num;
+ int den;
+};
+
+static const struct frac_entry frac_table_675m[] = { /* link rate of 270M */
+ { 52, 295 }, /* 119 M */
+ { 11, 57 }, /* 130.25 M */
+ { 63, 307 }, /* 138.50 M */
+ { 11, 50 }, /* 148.50 M */
+ { 47, 206 }, /* 154 M */
+ { 31, 100 }, /* 205.25 M */
+ { 107, 269 }, /* 268.50 M */
+ { },
+};
+
+static struct frac_entry frac_table_810m[] = { /* Link rate of 162M */
+ { 31, 211 }, /* 119 M */
+ { 32, 199 }, /* 130.25 M */
+ { 63, 307 }, /* 138.50 M */
+ { 11, 60 }, /* 148.50 M */
+ { 50, 263 }, /* 154 M */
+ { 31, 120 }, /* 205.25 M */
+ { 119, 359 }, /* 268.50 M */
+ { },
+};
+
+static int clk_edp_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ struct freq_tbl f = *rcg->freq_tbl;
+ const struct frac_entry *frac;
+ int delta = 100000;
+ s64 src_rate = parent_rate;
+ s64 request;
+ u32 mask = BIT(rcg->hid_width) - 1;
+ u32 hid_div;
+
+ if (src_rate == 810000000)
+ frac = frac_table_810m;
+ else
+ frac = frac_table_675m;
+
+ for (; frac->num; frac++) {
+ request = rate;
+ request *= frac->den;
+ request = div_s64(request, frac->num);
+ if ((src_rate < (request - delta)) ||
+ (src_rate > (request + delta)))
+ continue;
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
+ &hid_div);
+ f.pre_div = hid_div;
+ f.pre_div >>= CFG_SRC_DIV_SHIFT;
+ f.pre_div &= mask;
+ f.m = frac->num;
+ f.n = frac->den;
+
+ return clk_rcg2_configure(rcg, &f);
+ }
+
+ return -EINVAL;
+}
+
+static int clk_edp_pixel_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ /* Parent index is set statically in frequency table */
+ return clk_edp_pixel_set_rate(hw, rate, parent_rate);
+}
+
+static int clk_edp_pixel_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ const struct freq_tbl *f = rcg->freq_tbl;
+ const struct frac_entry *frac;
+ int delta = 100000;
+ s64 request;
+ u32 mask = BIT(rcg->hid_width) - 1;
+ u32 hid_div;
+ int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
+
+ /* Force the correct parent */
+ req->best_parent_hw = clk_hw_get_parent_by_index(hw, index);
+ req->best_parent_rate = clk_hw_get_rate(req->best_parent_hw);
+
+ if (req->best_parent_rate == 810000000)
+ frac = frac_table_810m;
+ else
+ frac = frac_table_675m;
+
+ for (; frac->num; frac++) {
+ request = req->rate;
+ request *= frac->den;
+ request = div_s64(request, frac->num);
+ if ((req->best_parent_rate < (request - delta)) ||
+ (req->best_parent_rate > (request + delta)))
+ continue;
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
+ &hid_div);
+ hid_div >>= CFG_SRC_DIV_SHIFT;
+ hid_div &= mask;
+
+ req->rate = calc_rate(req->best_parent_rate,
+ frac->num, frac->den,
+ !!frac->den, hid_div);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+const struct clk_ops clk_edp_pixel_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .set_rate = clk_edp_pixel_set_rate,
+ .set_rate_and_parent = clk_edp_pixel_set_rate_and_parent,
+ .determine_rate = clk_edp_pixel_determine_rate,
+};
+EXPORT_SYMBOL_GPL(clk_edp_pixel_ops);
+
+static int clk_byte_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ const struct freq_tbl *f = rcg->freq_tbl;
+ int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
+ unsigned long parent_rate, div;
+ u32 mask = BIT(rcg->hid_width) - 1;
+ struct clk_hw *p;
+
+ if (req->rate == 0)
+ return -EINVAL;
+
+ req->best_parent_hw = p = clk_hw_get_parent_by_index(hw, index);
+ req->best_parent_rate = parent_rate = clk_hw_round_rate(p, req->rate);
+
+ div = DIV_ROUND_UP((2 * parent_rate), req->rate) - 1;
+ div = min_t(u32, div, mask);
+
+ req->rate = calc_rate(parent_rate, 0, 0, 0, div);
+
+ return 0;
+}
+
+static int clk_byte_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ struct freq_tbl f = *rcg->freq_tbl;
+ unsigned long div;
+ u32 mask = BIT(rcg->hid_width) - 1;
+
+ div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
+ div = min_t(u32, div, mask);
+
+ f.pre_div = div;
+
+ return clk_rcg2_configure(rcg, &f);
+}
+
+static int clk_byte_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ /* Parent index is set statically in frequency table */
+ return clk_byte_set_rate(hw, rate, parent_rate);
+}
+
+const struct clk_ops clk_byte_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .set_rate = clk_byte_set_rate,
+ .set_rate_and_parent = clk_byte_set_rate_and_parent,
+ .determine_rate = clk_byte_determine_rate,
+};
+EXPORT_SYMBOL_GPL(clk_byte_ops);
+
+static int clk_byte2_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ unsigned long parent_rate, div;
+ u32 mask = BIT(rcg->hid_width) - 1;
+ struct clk_hw *p;
+ unsigned long rate = req->rate;
+
+ if (rate == 0)
+ return -EINVAL;
+
+ p = req->best_parent_hw;
+ req->best_parent_rate = parent_rate = clk_hw_round_rate(p, rate);
+
+ div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
+ div = min_t(u32, div, mask);
+
+ req->rate = calc_rate(parent_rate, 0, 0, 0, div);
+
+ return 0;
+}
+
+static int clk_byte2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ struct freq_tbl f = { 0 };
+ unsigned long div;
+ int i, num_parents = clk_hw_get_num_parents(hw);
+ u32 mask = BIT(rcg->hid_width) - 1;
+ u32 cfg;
+
+ div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
+ div = min_t(u32, div, mask);
+
+ f.pre_div = div;
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+ cfg &= CFG_SRC_SEL_MASK;
+ cfg >>= CFG_SRC_SEL_SHIFT;
+
+ for (i = 0; i < num_parents; i++) {
+ if (cfg == rcg->parent_map[i].cfg) {
+ f.src = rcg->parent_map[i].src;
+ return clk_rcg2_configure(rcg, &f);
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int clk_byte2_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ /* Read the hardware to determine parent during set_rate */
+ return clk_byte2_set_rate(hw, rate, parent_rate);
+}
+
+const struct clk_ops clk_byte2_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .set_rate = clk_byte2_set_rate,
+ .set_rate_and_parent = clk_byte2_set_rate_and_parent,
+ .determine_rate = clk_byte2_determine_rate,
+};
+EXPORT_SYMBOL_GPL(clk_byte2_ops);
+
+static const struct frac_entry frac_table_pixel[] = {
+ { 3, 8 },
+ { 2, 9 },
+ { 4, 9 },
+ { 1, 1 },
+ { }
+};
+
+static int clk_pixel_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ unsigned long request, src_rate;
+ int delta = 100000;
+ const struct frac_entry *frac = frac_table_pixel;
+
+ for (; frac->num; frac++) {
+ request = (req->rate * frac->den) / frac->num;
+
+ src_rate = clk_hw_round_rate(req->best_parent_hw, request);
+ if ((src_rate < (request - delta)) ||
+ (src_rate > (request + delta)))
+ continue;
+
+ req->best_parent_rate = src_rate;
+ req->rate = (src_rate * frac->num) / frac->den;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ struct freq_tbl f = { 0 };
+ const struct frac_entry *frac = frac_table_pixel;
+ unsigned long request;
+ int delta = 100000;
+ u32 mask = BIT(rcg->hid_width) - 1;
+ u32 hid_div, cfg;
+ int i, num_parents = clk_hw_get_num_parents(hw);
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+ cfg &= CFG_SRC_SEL_MASK;
+ cfg >>= CFG_SRC_SEL_SHIFT;
+
+ for (i = 0; i < num_parents; i++)
+ if (cfg == rcg->parent_map[i].cfg) {
+ f.src = rcg->parent_map[i].src;
+ break;
+ }
+
+ for (; frac->num; frac++) {
+ request = (rate * frac->den) / frac->num;
+
+ if ((parent_rate < (request - delta)) ||
+ (parent_rate > (request + delta)))
+ continue;
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
+ &hid_div);
+ f.pre_div = hid_div;
+ f.pre_div >>= CFG_SRC_DIV_SHIFT;
+ f.pre_div &= mask;
+ f.m = frac->num;
+ f.n = frac->den;
+
+ return clk_rcg2_configure(rcg, &f);
+ }
+ return -EINVAL;
+}
+
+static int clk_pixel_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate, u8 index)
+{
+ return clk_pixel_set_rate(hw, rate, parent_rate);
+}
+
+const struct clk_ops clk_pixel_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .set_rate = clk_pixel_set_rate,
+ .set_rate_and_parent = clk_pixel_set_rate_and_parent,
+ .determine_rate = clk_pixel_determine_rate,
+};
+EXPORT_SYMBOL_GPL(clk_pixel_ops);
+
+static int clk_gfx3d_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rate_request parent_req = { };
+ struct clk_hw *p2, *p8, *p9, *xo;
+ unsigned long p9_rate;
+ int ret;
+
+ xo = clk_hw_get_parent_by_index(hw, 0);
+ if (req->rate == clk_hw_get_rate(xo)) {
+ req->best_parent_hw = xo;
+ return 0;
+ }
+
+ p9 = clk_hw_get_parent_by_index(hw, 2);
+ p2 = clk_hw_get_parent_by_index(hw, 3);
+ p8 = clk_hw_get_parent_by_index(hw, 4);
+
+ /* PLL9 is a fixed rate PLL */
+ p9_rate = clk_hw_get_rate(p9);
+
+ parent_req.rate = req->rate = min(req->rate, p9_rate);
+ if (req->rate == p9_rate) {
+ req->rate = req->best_parent_rate = p9_rate;
+ req->best_parent_hw = p9;
+ return 0;
+ }
+
+ if (req->best_parent_hw == p9) {
+ /* Are we going back to a previously used rate? */
+ if (clk_hw_get_rate(p8) == req->rate)
+ req->best_parent_hw = p8;
+ else
+ req->best_parent_hw = p2;
+ } else if (req->best_parent_hw == p8) {
+ req->best_parent_hw = p2;
+ } else {
+ req->best_parent_hw = p8;
+ }
+
+ ret = __clk_determine_rate(req->best_parent_hw, &parent_req);
+ if (ret)
+ return ret;
+
+ req->rate = req->best_parent_rate = parent_req.rate;
+
+ return 0;
+}
+
+static int clk_gfx3d_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate, u8 index)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ u32 cfg;
+ int ret;
+
+ /* Just mux it, we don't use the division or m/n hardware */
+ cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
+ ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
+ if (ret)
+ return ret;
+
+ return update_config(rcg);
+}
+
+static int clk_gfx3d_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ /*
+ * We should never get here; clk_gfx3d_determine_rate() should always
+ * make us use a different parent than what we're currently using, so
+ * clk_gfx3d_set_rate_and_parent() should always be called.
+ */
+ return 0;
+}
+
+const struct clk_ops clk_gfx3d_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .set_rate = clk_gfx3d_set_rate,
+ .set_rate_and_parent = clk_gfx3d_set_rate_and_parent,
+ .determine_rate = clk_gfx3d_determine_rate,
+};
+EXPORT_SYMBOL_GPL(clk_gfx3d_ops);
+
+static int clk_rcg2_set_force_enable(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ const char *name = clk_hw_get_name(hw);
+ int ret, count;
+
+ ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
+ CMD_ROOT_EN, CMD_ROOT_EN);
+ if (ret)
+ return ret;
+
+ /* wait for RCG to turn ON */
+ for (count = 500; count > 0; count--) {
+ if (clk_rcg2_is_enabled(hw))
+ return 0;
+
+ udelay(1);
+ }
+
+ pr_err("%s: RCG did not turn on\n", name);
+ return -ETIMEDOUT;
+}
+
+static int clk_rcg2_clear_force_enable(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
+ CMD_ROOT_EN, 0);
+}
+
+static int
+clk_rcg2_shared_force_enable_clear(struct clk_hw *hw, const struct freq_tbl *f)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ int ret;
+
+ ret = clk_rcg2_set_force_enable(hw);
+ if (ret)
+ return ret;
+
+ ret = clk_rcg2_configure(rcg, f);
+ if (ret)
+ return ret;
+
+ return clk_rcg2_clear_force_enable(hw);
+}
+
+static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ const struct freq_tbl *f;
+
+ f = qcom_find_freq(rcg->freq_tbl, rate);
+ if (!f)
+ return -EINVAL;
+
+ /*
+ * In case clock is disabled, update the CFG, M, N and D registers
+ * and don't hit the update bit of CMD register.
+ */
+ if (!__clk_is_enabled(hw->clk))
+ return __clk_rcg2_configure(rcg, f);
+
+ return clk_rcg2_shared_force_enable_clear(hw, f);
+}
+
+static int clk_rcg2_shared_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ return clk_rcg2_shared_set_rate(hw, rate, parent_rate);
+}
+
+static int clk_rcg2_shared_enable(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ int ret;
+
+ /*
+ * Set the update bit because required configuration has already
+ * been written in clk_rcg2_shared_set_rate()
+ */
+ ret = clk_rcg2_set_force_enable(hw);
+ if (ret)
+ return ret;
+
+ ret = update_config(rcg);
+ if (ret)
+ return ret;
+
+ return clk_rcg2_clear_force_enable(hw);
+}
+
+static void clk_rcg2_shared_disable(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ u32 cfg;
+
+ /*
+ * Store current configuration as switching to safe source would clear
+ * the SRC and DIV of CFG register
+ */
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+
+ /*
+ * Park the RCG at a safe configuration - sourced off of safe source.
+ * Force enable and disable the RCG while configuring it to safeguard
+ * against any update signal coming from the downstream clock.
+ * The current parent is still prepared and enabled at this point, and
+ * the safe source is always on while application processor subsystem
+ * is online. Therefore, the RCG can safely switch its parent.
+ */
+ clk_rcg2_set_force_enable(hw);
+
+ regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
+ rcg->safe_src_index << CFG_SRC_SEL_SHIFT);
+
+ update_config(rcg);
+
+ clk_rcg2_clear_force_enable(hw);
+
+ /* Write back the stored configuration corresponding to current rate */
+ regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
+}
+
+const struct clk_ops clk_rcg2_shared_ops = {
+ .enable = clk_rcg2_shared_enable,
+ .disable = clk_rcg2_shared_disable,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .determine_rate = clk_rcg2_determine_rate,
+ .set_rate = clk_rcg2_shared_set_rate,
+ .set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent,
+};
+EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);
diff --git a/rr-cache/722f55cef1394c13414ff3d97a7f9594145c9179/postimage.1 b/rr-cache/722f55cef1394c13414ff3d97a7f9594145c9179/postimage.1
new file mode 100644
index 0000000..ee40329
--- /dev/null
+++ b/rr-cache/722f55cef1394c13414ff3d97a7f9594145c9179/postimage.1
@@ -0,0 +1,1771 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/phy/phy.h>
+
+#include "phy-qcom-qmp.h"
+
+/* QPHY_SW_RESET bit */
+#define SW_RESET BIT(0)
+/* QPHY_POWER_DOWN_CONTROL */
+#define SW_PWRDN BIT(0)
+#define REFCLK_DRV_DSBL BIT(1)
+/* QPHY_START_CONTROL bits */
+#define SERDES_START BIT(0)
+#define PCS_START BIT(1)
+#define PLL_READY_GATE_EN BIT(3)
+/* QPHY_PCS_STATUS bit */
+#define PHYSTATUS BIT(6)
+/* QPHY_COM_PCS_READY_STATUS bit */
+#define PCS_READY BIT(0)
+
+/* QPHY_V3_DP_COM_RESET_OVRD_CTRL register bits */
+/* DP PHY soft reset */
+#define SW_DPPHY_RESET BIT(0)
+/* mux to select DP PHY reset control, 0:HW control, 1: software reset */
+#define SW_DPPHY_RESET_MUX BIT(1)
+/* USB3 PHY soft reset */
+#define SW_USB3PHY_RESET BIT(2)
+/* mux to select USB3 PHY reset control, 0:HW control, 1: software reset */
+#define SW_USB3PHY_RESET_MUX BIT(3)
+
+/* QPHY_V3_DP_COM_PHY_MODE_CTRL register bits */
+#define USB3_MODE BIT(0) /* enables USB3 mode */
+#define DP_MODE BIT(1) /* enables DP mode */
+
+/* QPHY_PCS_AUTONOMOUS_MODE_CTRL register bits */
+#define ARCVR_DTCT_EN BIT(0)
+#define ALFPS_DTCT_EN BIT(1)
+#define ARCVR_DTCT_EVENT_SEL BIT(4)
+
+/* QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR register bits */
+#define IRQ_CLEAR BIT(0)
+
+/* QPHY_PCS_LFPS_RXTERM_IRQ_STATUS register bits */
+#define RCVR_DETECT BIT(0)
+
+/* QPHY_V3_PCS_MISC_CLAMP_ENABLE register bits */
+#define CLAMP_EN BIT(0) /* enables i/o clamp_n */
+
+#define PHY_INIT_COMPLETE_TIMEOUT 1000
+#define POWER_DOWN_DELAY_US_MIN 10
+#define POWER_DOWN_DELAY_US_MAX 11
+
+#define MAX_PROP_NAME 32
+
+struct qmp_phy_init_tbl {
+ unsigned int offset;
+ unsigned int val;
+ /*
+ * register part of layout ?
+ * if yes, then offset gives index in the reg-layout
+ */
+ int in_layout;
+};
+
+#define QMP_PHY_INIT_CFG(o, v) \
+ { \
+ .offset = o, \
+ .val = v, \
+ }
+
+#define QMP_PHY_INIT_CFG_L(o, v) \
+ { \
+ .offset = o, \
+ .val = v, \
+ .in_layout = 1, \
+ }
+
+/* set of registers with offsets different per-PHY */
+enum qphy_reg_layout {
+ /* Common block control registers */
+ QPHY_COM_SW_RESET,
+ QPHY_COM_POWER_DOWN_CONTROL,
+ QPHY_COM_START_CONTROL,
+ QPHY_COM_PCS_READY_STATUS,
+ /* PCS registers */
+ QPHY_PLL_LOCK_CHK_DLY_TIME,
+ QPHY_FLL_CNTRL1,
+ QPHY_FLL_CNTRL2,
+ QPHY_FLL_CNT_VAL_L,
+ QPHY_FLL_CNT_VAL_H_TOL,
+ QPHY_FLL_MAN_CODE,
+ QPHY_SW_RESET,
+ QPHY_START_CTRL,
+ QPHY_PCS_READY_STATUS,
+ QPHY_PCS_AUTONOMOUS_MODE_CTRL,
+ QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR,
+ QPHY_PCS_LFPS_RXTERM_IRQ_STATUS,
+};
+
+static const unsigned int pciephy_regs_layout[] = {
+ [QPHY_COM_SW_RESET] = 0x400,
+ [QPHY_COM_POWER_DOWN_CONTROL] = 0x404,
+ [QPHY_COM_START_CONTROL] = 0x408,
+ [QPHY_COM_PCS_READY_STATUS] = 0x448,
+ [QPHY_PLL_LOCK_CHK_DLY_TIME] = 0xa8,
+ [QPHY_FLL_CNTRL1] = 0xc4,
+ [QPHY_FLL_CNTRL2] = 0xc8,
+ [QPHY_FLL_CNT_VAL_L] = 0xcc,
+ [QPHY_FLL_CNT_VAL_H_TOL] = 0xd0,
+ [QPHY_FLL_MAN_CODE] = 0xd4,
+ [QPHY_SW_RESET] = 0x00,
+ [QPHY_START_CTRL] = 0x08,
+ [QPHY_PCS_READY_STATUS] = 0x174,
+};
+
+static const unsigned int usb3phy_regs_layout[] = {
+ [QPHY_FLL_CNTRL1] = 0xc0,
+ [QPHY_FLL_CNTRL2] = 0xc4,
+ [QPHY_FLL_CNT_VAL_L] = 0xc8,
+ [QPHY_FLL_CNT_VAL_H_TOL] = 0xcc,
+ [QPHY_FLL_MAN_CODE] = 0xd0,
+ [QPHY_SW_RESET] = 0x00,
+ [QPHY_START_CTRL] = 0x08,
+ [QPHY_PCS_READY_STATUS] = 0x17c,
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x0d4,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x0d8,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x178,
+};
+
+static const unsigned int qmp_v3_usb3phy_regs_layout[] = {
+ [QPHY_SW_RESET] = 0x00,
+ [QPHY_START_CTRL] = 0x08,
+ [QPHY_PCS_READY_STATUS] = 0x174,
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x0d8,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x0dc,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x170,
+};
+
+static const unsigned int ufsphy_regs_layout[] = {
+ [QPHY_START_CTRL] = 0x00,
+ [QPHY_PCS_READY_STATUS] = 0x168,
+};
+
+static const struct qmp_phy_init_tbl msm8996_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_ENABLE1, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x33),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x42),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x33),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_COM_RESCODE_DIV_NUM, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_EP_DIV, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_ENABLE1, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_RESCODE_DIV_NUM, 0x40),
+};
+
+static const struct qmp_phy_init_tbl msm8996_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45),
+ QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x06),
+};
+
+static const struct qmp_phy_init_tbl msm8996_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_ENABLES, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xdb),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_BAND, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN_HALF, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_LVL, 0x19),
+};
+
+static const struct qmp_phy_init_tbl msm8996_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_RX_IDLE_DTCT_CNTRL, 0x4c),
+ QMP_PHY_INIT_CFG(QPHY_PWRUP_RESET_DLY_TIME_AUXCLK, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_LP_WAKEUP_DLY_TIME_AUXCLK, 0x01),
+
+ QMP_PHY_INIT_CFG_L(QPHY_PLL_LOCK_CHK_DLY_TIME, 0x05),
+
+ QMP_PHY_INIT_CFG(QPHY_ENDPOINT_REFCLK_DRIVE, 0x05),
+ QMP_PHY_INIT_CFG(QPHY_POWER_DOWN_CONTROL, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_POWER_STATE_CONFIG4, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_POWER_STATE_CONFIG1, 0xa3),
+ QMP_PHY_INIT_CFG(QPHY_TXDEEMPH_M3P5DB_V0, 0x0e),
+};
+
+static const struct qmp_phy_init_tbl msm8996_usb3_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x04),
+ /* PLL and Loop filter settings */
+ QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_CTRL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_CFG, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x0a),
+ /* SSC settings */
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x07),
+};
+
+static const struct qmp_phy_init_tbl msm8996_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45),
+ QMP_PHY_INIT_CFG(QSERDES_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x06),
+};
+
+static const struct qmp_phy_init_tbl msm8996_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4c),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xbb),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_LVL, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x16),
+};
+
+static const struct qmp_phy_init_tbl msm8996_usb3_pcs_tbl[] = {
+ /* FLL settings */
+ QMP_PHY_INIT_CFG_L(QPHY_FLL_CNTRL2, 0x03),
+ QMP_PHY_INIT_CFG_L(QPHY_FLL_CNTRL1, 0x02),
+ QMP_PHY_INIT_CFG_L(QPHY_FLL_CNT_VAL_L, 0x09),
+ QMP_PHY_INIT_CFG_L(QPHY_FLL_CNT_VAL_H_TOL, 0x42),
+ QMP_PHY_INIT_CFG_L(QPHY_FLL_MAN_CODE, 0x85),
+
+ /* Lock Det settings */
+ QMP_PHY_INIT_CFG(QPHY_LOCK_DETECT_CONFIG1, 0xd1),
+ QMP_PHY_INIT_CFG(QPHY_LOCK_DETECT_CONFIG2, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_LOCK_DETECT_CONFIG3, 0x47),
+ QMP_PHY_INIT_CFG(QPHY_POWER_STATE_CONFIG2, 0x08),
+};
+
+static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_ENABLE1, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0xf),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x6),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0xf),
+ QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV, 0xa),
+ QMP_PHY_INIT_CFG(QSERDES_COM_RESETSM_CNTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0xa),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0xa),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x3),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0xD),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0xD04),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x33),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x2),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0xb),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CTRL_BY_PSM, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_CTRL, 0xa),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x2),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_EP_DIV, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x7),
+};
+
+static const struct qmp_phy_init_tbl ipq8074_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45),
+ QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x6),
+ QMP_PHY_INIT_CFG(QSERDES_TX_RES_CODE_LANE_OFFSET, 0x2),
+ QMP_PHY_INIT_CFG(QSERDES_TX_RCV_DETECT_LVL_2, 0x12),
+};
+
+static const struct qmp_phy_init_tbl ipq8074_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_ENABLES, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xdb),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x4),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN_HALF, 0x4),
+};
+
+static const struct qmp_phy_init_tbl ipq8074_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_ENDPOINT_REFCLK_DRIVE, 0x4),
+ QMP_PHY_INIT_CFG(QPHY_OSC_DTCT_ACTIONS, 0x0),
+ QMP_PHY_INIT_CFG(QPHY_PWRUP_RESET_DLY_TIME_AUXCLK, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB, 0x0),
+ QMP_PHY_INIT_CFG(QPHY_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_PLL_LOCK_CHK_DLY_TIME_AUXCLK_LSB, 0x0),
+ QMP_PHY_INIT_CFG(QPHY_LP_WAKEUP_DLY_TIME_AUXCLK, 0x40),
+ QMP_PHY_INIT_CFG_L(QPHY_PLL_LOCK_CHK_DLY_TIME, 0x73),
+ QMP_PHY_INIT_CFG(QPHY_RX_SIGDET_LVL, 0x99),
+ QMP_PHY_INIT_CFG(QPHY_TXDEEMPH_M6DB_V0, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_TXDEEMPH_M3P5DB_V0, 0xe),
+ QMP_PHY_INIT_CFG_L(QPHY_SW_RESET, 0x0),
+ QMP_PHY_INIT_CFG_L(QPHY_START_CTRL, 0x3),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xc9),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_CFG, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_BUF_ENABLE, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE1, 0x85),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE2, 0x07),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x06),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x75),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_pcs_tbl[] = {
+ /* FLL settings */
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02),
+
+ /* Lock Det settings */
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG2, 0x1b),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0xba),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V0, 0x9f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V1, 0x9f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V2, 0xb7),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V3, 0x4e),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V4, 0x65),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_LS, 0x6b),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V1, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V1, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V2, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V2, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V3, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V3, 0x1d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V4, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V4, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_LS, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_LS, 0x0d),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RATE_SLEW_CNTRL, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_uniphy_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xc9),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_CFG, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_BUF_ENABLE, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE1, 0x85),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE2, 0x07),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_uniphy_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0xc6),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x06),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_uniphy_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_VGA_CAL_CNTRL2, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_00, 0x50),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x75),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_uniphy_pcs_tbl[] = {
+ /* FLL settings */
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02),
+
+ /* Lock Det settings */
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG2, 0x1b),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0xba),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V0, 0x9f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V1, 0x9f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V2, 0xb5),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V3, 0x4c),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V4, 0x64),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_LS, 0x6a),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V1, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V1, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V2, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V2, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V3, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V3, 0x1d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V4, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V4, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_LS, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_LS, 0x0d),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RATE_SLEW_CNTRL, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_REFGEN_REQ_CONFIG1, 0x21),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_REFGEN_REQ_CONFIG2, 0x60),
+};
+
+static const struct qmp_phy_init_tbl ufsphy_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BG_TIMER, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0xd5),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_CTRL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_INITVAL1, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_INITVAL2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xda),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE1, 0x98),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE1, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE1, 0xc1),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE1, 0x32),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE1, 0x0f),
+
+ /*Rate B*/
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x44),
+};
+
+static const struct qmp_phy_init_tbl ufsphy_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x06),
+};
+
+static const struct qmp_phy_init_tbl ufsphy_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_LVL, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_INTERFACE_MODE, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_TERM_BW, 0x5b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1d),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SVS_SO_GAIN_HALF, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SVS_SO_GAIN_QUARTER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SVS_SO_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_PI_CONTROLS, 0x81),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW, 0x80),
+};
+
+static const struct qmp_phy_init_tbl ufsphy_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_DOWN_CONTROL, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_MULTI_LANE_CTRL1, 0x02),
+};
+
+/* struct qmp_phy_cfg - per-PHY initialization config */
+struct qmp_phy_cfg {
+ /* phy-type - PCIE/UFS/USB */
+ unsigned int type;
+ /* number of lanes provided by phy */
+ int nlanes;
+
+ /* Init sequence for PHY blocks - serdes, tx, rx, pcs */
+ const struct qmp_phy_init_tbl *serdes_tbl;
+ int serdes_tbl_num;
+ const struct qmp_phy_init_tbl *tx_tbl;
+ int tx_tbl_num;
+ const struct qmp_phy_init_tbl *rx_tbl;
+ int rx_tbl_num;
+ const struct qmp_phy_init_tbl *pcs_tbl;
+ int pcs_tbl_num;
+
+ /* clock ids to be requested */
+ const char * const *clk_list;
+ int num_clks;
+ /* resets to be requested */
+ const char * const *reset_list;
+ int num_resets;
+ /* regulators to be requested */
+ const char * const *vreg_list;
+ int num_vregs;
+
+ /* array of registers with different offsets */
+ const unsigned int *regs;
+
+ unsigned int start_ctrl;
+ unsigned int pwrdn_ctrl;
+ unsigned int mask_pcs_ready;
+ unsigned int mask_com_pcs_ready;
+
+ /* true, if PHY has a separate PHY_COM control block */
+ bool has_phy_com_ctrl;
+ /* true, if PHY has a reset for individual lanes */
+ bool has_lane_rst;
+ /* true, if PHY needs delay after POWER_DOWN */
+ bool has_pwrdn_delay;
+ /* power_down delay in usec */
+ int pwrdn_delay_min;
+ int pwrdn_delay_max;
+
+ /* true, if PHY has a separate DP_COM control block */
+ bool has_phy_dp_com_ctrl;
+ /* Register offset of secondary tx/rx lanes for USB DP combo PHY */
+ unsigned int tx_b_lane_offset;
+ unsigned int rx_b_lane_offset;
+
+ /* true, if PCS block has a separate SW_RESET register */
+ bool has_sw_rst;
+};
+
+/**
+ * struct qmp_phy - per-lane phy descriptor
+ *
+ * @phy: generic phy
+ * @tx: iomapped memory space for lane's tx
+ * @rx: iomapped memory space for lane's rx
+ * @pcs: iomapped memory space for lane's pcs
+ * @pcs_misc: iomapped memory space for lane's pcs_misc
+ * @pipe_clk: pipe lock
+ * @index: lane index
+ * @qmp: QMP phy to which this lane belongs
+ * @lane_rst: lane's reset controller
+ */
+struct qmp_phy {
+ struct phy *phy;
+ void __iomem *tx;
+ void __iomem *rx;
+ void __iomem *pcs;
+ void __iomem *pcs_misc;
+ struct clk *pipe_clk;
+ unsigned int index;
+ struct qcom_qmp *qmp;
+ struct reset_control *lane_rst;
+};
+
+/**
+ * struct qcom_qmp - structure holding QMP phy block attributes
+ *
+ * @dev: device
+ * @serdes: iomapped memory space for phy's serdes
+ * @dp_com: iomapped memory space for phy's dp_com control block
+ *
+ * @clks: array of clocks required by phy
+ * @resets: array of resets required by phy
+ * @vregs: regulator supplies bulk data
+ *
+ * @cfg: phy specific configuration
+ * @phys: array of per-lane phy descriptors
+ * @phy_mutex: mutex lock for PHY common block initialization
+ * @init_count: phy common block initialization count
+ * @phy_initialized: indicate if PHY has been initialized
+ * @mode: current PHY mode
+ */
+struct qcom_qmp {
+ struct device *dev;
+ void __iomem *serdes;
+ void __iomem *dp_com;
+
+ struct clk_bulk_data *clks;
+ struct reset_control **resets;
+ struct regulator_bulk_data *vregs;
+
+ const struct qmp_phy_cfg *cfg;
+ struct qmp_phy **phys;
+
+ struct mutex phy_mutex;
+ int init_count;
+ bool phy_initialized;
+ enum phy_mode mode;
+};
+
+static inline void qphy_setbits(void __iomem *base, u32 offset, u32 val)
+{
+ u32 reg;
+
+ reg = readl(base + offset);
+ reg |= val;
+ writel(reg, base + offset);
+
+ /* ensure that above write is through */
+ readl(base + offset);
+}
+
+static inline void qphy_clrbits(void __iomem *base, u32 offset, u32 val)
+{
+ u32 reg;
+
+ reg = readl(base + offset);
+ reg &= ~val;
+ writel(reg, base + offset);
+
+ /* ensure that above write is through */
+ readl(base + offset);
+}
+
+/* list of clocks required by phy */
+static const char * const msm8996_phy_clk_l[] = {
+ "aux", "cfg_ahb", "ref",
+};
+
+static const char * const qmp_v3_phy_clk_l[] = {
+ "aux", "cfg_ahb", "ref", "com_aux",
+};
+
+static const char * const sdm845_ufs_phy_clk_l[] = {
+ "ref_clk", "ref_aux_clk",
+};
+
+/* list of resets */
+static const char * const msm8996_pciephy_reset_l[] = {
+ "phy", "common", "cfg",
+};
+
+static const char * const msm8996_usb3phy_reset_l[] = {
+ "phy", "common",
+};
+
+/* list of regulators */
+static const char * const msm8996_phy_vreg_l[] = {
+ "vdda-phy", "vdda-pll",
+};
+
+static const struct qmp_phy_cfg msm8996_pciephy_cfg = {
+ .type = PHY_TYPE_PCIE,
+ .nlanes = 3,
+
+ .serdes_tbl = msm8996_pcie_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(msm8996_pcie_serdes_tbl),
+ .tx_tbl = msm8996_pcie_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(msm8996_pcie_tx_tbl),
+ .rx_tbl = msm8996_pcie_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(msm8996_pcie_rx_tbl),
+ .pcs_tbl = msm8996_pcie_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(msm8996_pcie_pcs_tbl),
+ .clk_list = msm8996_phy_clk_l,
+ .num_clks = ARRAY_SIZE(msm8996_phy_clk_l),
+ .reset_list = msm8996_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_pciephy_reset_l),
+ .vreg_list = msm8996_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(msm8996_phy_vreg_l),
+ .regs = pciephy_regs_layout,
+
+ .start_ctrl = PCS_START | PLL_READY_GATE_EN,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .mask_com_pcs_ready = PCS_READY,
+
+ .has_phy_com_ctrl = true,
+ .has_lane_rst = true,
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+ .has_sw_rst = true,
+};
+
+static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = msm8996_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(msm8996_usb3_serdes_tbl),
+ .tx_tbl = msm8996_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(msm8996_usb3_tx_tbl),
+ .rx_tbl = msm8996_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(msm8996_usb3_rx_tbl),
+ .pcs_tbl = msm8996_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(msm8996_usb3_pcs_tbl),
+ .clk_list = msm8996_phy_clk_l,
+ .num_clks = ARRAY_SIZE(msm8996_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = msm8996_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(msm8996_phy_vreg_l),
+ .regs = usb3phy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .mask_pcs_ready = PHYSTATUS,
+ .has_sw_rst = true,
+};
+
+/* list of resets */
+static const char * const ipq8074_pciephy_reset_l[] = {
+ "phy", "common",
+};
+
+static const struct qmp_phy_cfg ipq8074_pciephy_cfg = {
+ .type = PHY_TYPE_PCIE,
+ .nlanes = 1,
+
+ .serdes_tbl = ipq8074_pcie_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(ipq8074_pcie_serdes_tbl),
+ .tx_tbl = ipq8074_pcie_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(ipq8074_pcie_tx_tbl),
+ .rx_tbl = ipq8074_pcie_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(ipq8074_pcie_rx_tbl),
+ .pcs_tbl = ipq8074_pcie_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(ipq8074_pcie_pcs_tbl),
+ .clk_list = NULL,
+ .num_clks = 0,
+ .reset_list = ipq8074_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l),
+ .vreg_list = NULL,
+ .num_vregs = 0,
+ .regs = pciephy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .mask_pcs_ready = PHYSTATUS,
+
+ .has_phy_com_ctrl = false,
+ .has_lane_rst = false,
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = 995, /* us */
+ .pwrdn_delay_max = 1005, /* us */
+ .has_sw_rst = true,
+};
+
+static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = qmp_v3_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_serdes_tbl),
+ .tx_tbl = qmp_v3_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_tx_tbl),
+ .rx_tbl = qmp_v3_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_rx_tbl),
+ .pcs_tbl = qmp_v3_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(qmp_v3_usb3_pcs_tbl),
+ .clk_list = qmp_v3_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = msm8996_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(msm8996_phy_vreg_l),
+ .regs = qmp_v3_usb3phy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .mask_pcs_ready = PHYSTATUS,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+
+ .has_phy_dp_com_ctrl = true,
+ .tx_b_lane_offset = 0x400,
+ .rx_b_lane_offset = 0x400,
+ .has_sw_rst = true,
+};
+
+static const struct qmp_phy_cfg sdm845_ufsphy_cfg = {
+ .type = PHY_TYPE_UFS,
+ .nlanes = 2,
+
+ .serdes_tbl = ufsphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(ufsphy_serdes_tbl),
+ .tx_tbl = ufsphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(ufsphy_tx_tbl),
+ .rx_tbl = ufsphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(ufsphy_rx_tbl),
+ .pcs_tbl = ufsphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(ufsphy_pcs_tbl),
+ .clk_list = sdm845_ufs_phy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l),
+ .vreg_list = msm8996_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(msm8996_phy_vreg_l),
+ .regs = ufsphy_regs_layout,
+
+ .start_ctrl = SERDES_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .mask_pcs_ready = PCS_READY,
+};
+
+static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = qmp_v3_usb3_uniphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_uniphy_serdes_tbl),
+ .tx_tbl = qmp_v3_usb3_uniphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_uniphy_tx_tbl),
+ .rx_tbl = qmp_v3_usb3_uniphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_uniphy_rx_tbl),
+ .pcs_tbl = qmp_v3_usb3_uniphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(qmp_v3_usb3_uniphy_pcs_tbl),
+ .clk_list = qmp_v3_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = msm8996_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(msm8996_phy_vreg_l),
+ .regs = qmp_v3_usb3phy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .mask_pcs_ready = PHYSTATUS,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+};
+
+static void qcom_qmp_phy_configure(void __iomem *base,
+ const unsigned int *regs,
+ const struct qmp_phy_init_tbl tbl[],
+ int num)
+{
+ int i;
+ const struct qmp_phy_init_tbl *t = tbl;
+
+ if (!t)
+ return;
+
+ for (i = 0; i < num; i++, t++) {
+ if (t->in_layout)
+ writel(t->val, base + regs[t->offset]);
+ else
+ writel(t->val, base + t->offset);
+ }
+}
+
+static int qcom_qmp_phy_com_init(struct qcom_qmp *qmp)
+{
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *serdes = qmp->serdes;
+ void __iomem *dp_com = qmp->dp_com;
+ int ret, i;
+
+ mutex_lock(&qmp->phy_mutex);
+ if (qmp->init_count++) {
+ mutex_unlock(&qmp->phy_mutex);
+ return 0;
+ }
+
+ /* turn on regulator supplies */
+ ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs);
+ if (ret) {
+ dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret);
+ goto err_reg_enable;
+ }
+
+ for (i = 0; i < cfg->num_resets; i++) {
+ ret = reset_control_assert(qmp->resets[i]);
+ if (ret) {
+ dev_err(qmp->dev, "%s reset assert failed\n",
+ cfg->reset_list[i]);
+ goto err_rst_assert;
+ }
+ }
+
+ for (i = cfg->num_resets - 1; i >= 0; i--) {
+ ret = reset_control_deassert(qmp->resets[i]);
+ if (ret) {
+ dev_err(qmp->dev, "%s reset deassert failed\n",
+ qmp->cfg->reset_list[i]);
+ goto err_rst;
+ }
+ }
+
+ ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks);
+ if (ret) {
+ dev_err(qmp->dev, "failed to enable clks, err=%d\n", ret);
+ goto err_rst;
+ }
+
+ if (cfg->has_phy_com_ctrl)
+ qphy_setbits(serdes, cfg->regs[QPHY_COM_POWER_DOWN_CONTROL],
+ SW_PWRDN);
+
+ if (cfg->has_phy_dp_com_ctrl) {
+ qphy_setbits(dp_com, QPHY_V3_DP_COM_POWER_DOWN_CTRL,
+ SW_PWRDN);
+ /* override hardware control for reset of qmp phy */
+ qphy_setbits(dp_com, QPHY_V3_DP_COM_RESET_OVRD_CTRL,
+ SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
+ SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
+
+ qphy_setbits(dp_com, QPHY_V3_DP_COM_PHY_MODE_CTRL,
+ USB3_MODE | DP_MODE);
+
+ /* bring both QMP USB and QMP DP PHYs PCS block out of reset */
+ qphy_clrbits(dp_com, QPHY_V3_DP_COM_RESET_OVRD_CTRL,
+ SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
+ SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
+ }
+
+ /* Serdes configuration */
+ qcom_qmp_phy_configure(serdes, cfg->regs, cfg->serdes_tbl,
+ cfg->serdes_tbl_num);
+
+ if (cfg->has_phy_com_ctrl) {
+ void __iomem *status;
+ unsigned int mask, val;
+
+ if (cfg->has_sw_rst)
+ qphy_clrbits(serdes, cfg->regs[QPHY_COM_SW_RESET],
+ SW_RESET);
+ qphy_setbits(serdes, cfg->regs[QPHY_COM_START_CONTROL],
+ SERDES_START | PCS_START);
+
+ status = serdes + cfg->regs[QPHY_COM_PCS_READY_STATUS];
+ mask = cfg->mask_com_pcs_ready;
+
+ ret = readl_poll_timeout(status, val, (val & mask), 10,
+ PHY_INIT_COMPLETE_TIMEOUT);
+ if (ret) {
+ dev_err(qmp->dev,
+ "phy common block init timed-out\n");
+ goto err_com_init;
+ }
+ }
+
+ mutex_unlock(&qmp->phy_mutex);
+
+ return 0;
+
+err_com_init:
+ clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+err_rst:
+ while (++i < cfg->num_resets)
+ reset_control_assert(qmp->resets[i]);
+err_rst_assert:
+ regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
+err_reg_enable:
+ mutex_unlock(&qmp->phy_mutex);
+
+ return ret;
+}
+
+static int qcom_qmp_phy_com_exit(struct qcom_qmp *qmp)
+{
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *serdes = qmp->serdes;
+ int i = cfg->num_resets;
+
+ mutex_lock(&qmp->phy_mutex);
+ if (--qmp->init_count) {
+ mutex_unlock(&qmp->phy_mutex);
+ return 0;
+ }
+
+ if (cfg->has_phy_com_ctrl) {
+ qphy_setbits(serdes, cfg->regs[QPHY_COM_START_CONTROL],
+ SERDES_START | PCS_START);
+ if (cfg->has_sw_rst)
+ qphy_clrbits(serdes, cfg->regs[QPHY_COM_SW_RESET],
+ SW_RESET);
+ qphy_setbits(serdes, cfg->regs[QPHY_COM_POWER_DOWN_CONTROL],
+ SW_PWRDN);
+ }
+
+ while (--i >= 0)
+ reset_control_assert(qmp->resets[i]);
+
+ clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+
+ regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
+
+ mutex_unlock(&qmp->phy_mutex);
+
+ return 0;
+}
+
+/* PHY Initialization */
+static int qcom_qmp_phy_init(struct phy *phy)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *tx = qphy->tx;
+ void __iomem *rx = qphy->rx;
+ void __iomem *pcs = qphy->pcs;
+ void __iomem *dp_com = qmp->dp_com;
+ void __iomem *status;
+ unsigned int mask, val;
+ int ret;
+
+ dev_vdbg(qmp->dev, "Initializing QMP phy\n");
+
+ ret = qcom_qmp_phy_com_init(qmp);
+ if (ret)
+ return ret;
+
+ if (cfg->has_lane_rst) {
+ ret = reset_control_deassert(qphy->lane_rst);
+ if (ret) {
+ dev_err(qmp->dev, "lane%d reset deassert failed\n",
+ qphy->index);
+ goto err_lane_rst;
+ }
+ }
+
+ ret = clk_prepare_enable(qphy->pipe_clk);
+ if (ret) {
+ dev_err(qmp->dev, "pipe_clk enable failed err=%d\n", ret);
+ goto err_clk_enable;
+ }
+
+ /* Tx, Rx, and PCS configurations */
+ qcom_qmp_phy_configure(tx, cfg->regs, cfg->tx_tbl, cfg->tx_tbl_num);
+ /* Configuration for other LANE for USB-DP combo PHY */
+ if (cfg->has_phy_dp_com_ctrl)
+ qcom_qmp_phy_configure(tx + cfg->tx_b_lane_offset, cfg->regs,
+ cfg->tx_tbl, cfg->tx_tbl_num);
+
+ qcom_qmp_phy_configure(rx, cfg->regs, cfg->rx_tbl, cfg->rx_tbl_num);
+ if (cfg->has_phy_dp_com_ctrl)
+ qcom_qmp_phy_configure(rx + cfg->rx_b_lane_offset, cfg->regs,
+ cfg->rx_tbl, cfg->rx_tbl_num);
+
+ qcom_qmp_phy_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+
+ /*
+ * Pull out PHY from POWER DOWN state.
+ * This is active low enable signal to power-down PHY.
+ */
+ qphy_setbits(pcs, QPHY_POWER_DOWN_CONTROL, cfg->pwrdn_ctrl);
+
+ if (cfg->has_pwrdn_delay)
+ usleep_range(cfg->pwrdn_delay_min, cfg->pwrdn_delay_max);
+
+ /* Pull PHY out of reset state */
+ if (cfg->has_sw_rst)
+ qphy_clrbits(pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+ if (cfg->has_phy_dp_com_ctrl)
+ qphy_clrbits(dp_com, QPHY_V3_DP_COM_SW_RESET, SW_RESET);
+
+ /* start SerDes and Phy-Coding-Sublayer */
+ qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
+
+ status = pcs + cfg->regs[QPHY_PCS_READY_STATUS];
+ mask = cfg->mask_pcs_ready;
+
+ ret = readl_poll_timeout(status, val, !(val & mask), 1,
+ PHY_INIT_COMPLETE_TIMEOUT);
+ if (ret) {
+ dev_err(qmp->dev, "phy initialization timed-out\n");
+ goto err_pcs_ready;
+ }
+ qmp->phy_initialized = true;
+
+ return ret;
+
+err_pcs_ready:
+ clk_disable_unprepare(qphy->pipe_clk);
+err_clk_enable:
+ if (cfg->has_lane_rst)
+ reset_control_assert(qphy->lane_rst);
+err_lane_rst:
+ qcom_qmp_phy_com_exit(qmp);
+
+ return ret;
+}
+
+static int qcom_qmp_phy_exit(struct phy *phy)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+
+ clk_disable_unprepare(qphy->pipe_clk);
+
+ /* PHY reset */
+ if (cfg->has_sw_rst)
+ qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+
+ /* stop SerDes and Phy-Coding-Sublayer */
+ qphy_clrbits(qphy->pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
+
+ /* Put PHY into POWER DOWN state: active low */
+ qphy_clrbits(qphy->pcs, QPHY_POWER_DOWN_CONTROL, cfg->pwrdn_ctrl);
+
+ if (cfg->has_lane_rst)
+ reset_control_assert(qphy->lane_rst);
+
+ qcom_qmp_phy_com_exit(qmp);
+
+ qmp->phy_initialized = false;
+
+ return 0;
+}
+
+static int qcom_qmp_phy_set_mode(struct phy *phy, enum phy_mode mode)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ struct qcom_qmp *qmp = qphy->qmp;
+
+ qmp->mode = mode;
+
+ return 0;
+}
+
+static void qcom_qmp_phy_enable_autonomous_mode(struct qmp_phy *qphy)
+{
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *pcs = qphy->pcs;
+ void __iomem *pcs_misc = qphy->pcs_misc;
+ u32 intr_mask;
+
+ if (qmp->mode == PHY_MODE_USB_HOST_SS ||
+ qmp->mode == PHY_MODE_USB_DEVICE_SS)
+ intr_mask = ARCVR_DTCT_EN | ALFPS_DTCT_EN;
+ else
+ intr_mask = ARCVR_DTCT_EN | ARCVR_DTCT_EVENT_SEL;
+
+ /* Clear any pending interrupts status */
+ qphy_setbits(pcs, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
+ /* Writing 1 followed by 0 clears the interrupt */
+ qphy_clrbits(pcs, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
+
+ qphy_clrbits(pcs, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL],
+ ARCVR_DTCT_EN | ALFPS_DTCT_EN | ARCVR_DTCT_EVENT_SEL);
+
+ /* Enable required PHY autonomous mode interrupts */
+ qphy_setbits(pcs, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL], intr_mask);
+
+ /* Enable i/o clamp_n for autonomous mode */
+ if (pcs_misc)
+ qphy_clrbits(pcs_misc, QPHY_V3_PCS_MISC_CLAMP_ENABLE, CLAMP_EN);
+}
+
+static void qcom_qmp_phy_disable_autonomous_mode(struct qmp_phy *qphy)
+{
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *pcs = qphy->pcs;
+ void __iomem *pcs_misc = qphy->pcs_misc;
+
+ /* Disable i/o clamp_n on resume for normal mode */
+ if (pcs_misc)
+ qphy_setbits(pcs_misc, QPHY_V3_PCS_MISC_CLAMP_ENABLE, CLAMP_EN);
+
+ qphy_clrbits(pcs, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL],
+ ARCVR_DTCT_EN | ARCVR_DTCT_EVENT_SEL | ALFPS_DTCT_EN);
+
+ qphy_setbits(pcs, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
+ /* Writing 1 followed by 0 clears the interrupt */
+ qphy_clrbits(pcs, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
+}
+
+static int __maybe_unused qcom_qmp_phy_runtime_suspend(struct device *dev)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ struct qmp_phy *qphy = qmp->phys[0];
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+
+ dev_vdbg(dev, "Suspending QMP phy, mode:%d\n", qmp->mode);
+
+ /* Supported only for USB3 PHY */
+ if (cfg->type != PHY_TYPE_USB3)
+ return 0;
+
+ if (!qmp->phy_initialized) {
+ dev_vdbg(dev, "PHY not initialized, bailing out\n");
+ return 0;
+ }
+
+ qcom_qmp_phy_enable_autonomous_mode(qphy);
+
+ clk_disable_unprepare(qphy->pipe_clk);
+ clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+
+ return 0;
+}
+
+static int __maybe_unused qcom_qmp_phy_runtime_resume(struct device *dev)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ struct qmp_phy *qphy = qmp->phys[0];
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ int ret = 0;
+
+ dev_vdbg(dev, "Resuming QMP phy, mode:%d\n", qmp->mode);
+
+ /* Supported only for USB3 PHY */
+ if (cfg->type != PHY_TYPE_USB3)
+ return 0;
+
+ if (!qmp->phy_initialized) {
+ dev_vdbg(dev, "PHY not initialized, bailing out\n");
+ return 0;
+ }
+
+ ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks);
+ if (ret) {
+ dev_err(qmp->dev, "failed to enable clks, err=%d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(qphy->pipe_clk);
+ if (ret) {
+ dev_err(dev, "pipe_clk enable failed, err=%d\n", ret);
+ clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+ return ret;
+ }
+
+ qcom_qmp_phy_disable_autonomous_mode(qphy);
+
+ return 0;
+}
+
+static int qcom_qmp_phy_vreg_init(struct device *dev)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ int num = qmp->cfg->num_vregs;
+ int i;
+
+ qmp->vregs = devm_kcalloc(dev, num, sizeof(*qmp->vregs), GFP_KERNEL);
+ if (!qmp->vregs)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++)
+ qmp->vregs[i].supply = qmp->cfg->vreg_list[i];
+
+ return devm_regulator_bulk_get(dev, num, qmp->vregs);
+}
+
+static int qcom_qmp_phy_reset_init(struct device *dev)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ int i;
+
+ qmp->resets = devm_kcalloc(dev, qmp->cfg->num_resets,
+ sizeof(*qmp->resets), GFP_KERNEL);
+ if (!qmp->resets)
+ return -ENOMEM;
+
+ for (i = 0; i < qmp->cfg->num_resets; i++) {
+ struct reset_control *rst;
+ const char *name = qmp->cfg->reset_list[i];
+
+ rst = devm_reset_control_get(dev, name);
+ if (IS_ERR(rst)) {
+ dev_err(dev, "failed to get %s reset\n", name);
+ return PTR_ERR(rst);
+ }
+ qmp->resets[i] = rst;
+ }
+
+ return 0;
+}
+
+static int qcom_qmp_phy_clk_init(struct device *dev)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ int num = qmp->cfg->num_clks;
+ int i;
+
+ qmp->clks = devm_kcalloc(dev, num, sizeof(*qmp->clks), GFP_KERNEL);
+ if (!qmp->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++)
+ qmp->clks[i].id = qmp->cfg->clk_list[i];
+
+ return devm_clk_bulk_get(dev, num, qmp->clks);
+}
+
+/*
+ * Register a fixed rate pipe clock.
+ *
+ * The <s>_pipe_clksrc generated by PHY goes to the GCC that gate
+ * controls it. The <s>_pipe_clk coming out of the GCC is requested
+ * by the PHY driver for its operations.
+ * We register the <s>_pipe_clksrc here. The gcc driver takes care
+ * of assigning this <s>_pipe_clksrc as parent to <s>_pipe_clk.
+ * Below picture shows this relationship.
+ *
+ * +---------------+
+ * | PHY block |<<---------------------------------------+
+ * | | |
+ * | +-------+ | +-----+ |
+ * I/P---^-->| PLL |---^--->pipe_clksrc--->| GCC |--->pipe_clk---+
+ * clk | +-------+ | +-----+
+ * +---------------+
+ */
+static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np)
+{
+ struct clk_fixed_rate *fixed;
+ struct clk_init_data init = { };
+ int ret;
+
+ if ((qmp->cfg->type != PHY_TYPE_USB3) &&
+ (qmp->cfg->type != PHY_TYPE_PCIE)) {
+ /* not all phys register pipe clocks, so return success */
+ return 0;
+ }
+
+ ret = of_property_read_string(np, "clock-output-names", &init.name);
+ if (ret) {
+ dev_err(qmp->dev, "%s: No clock-output-names\n", np->name);
+ return ret;
+ }
+
+ fixed = devm_kzalloc(qmp->dev, sizeof(*fixed), GFP_KERNEL);
+ if (!fixed)
+ return -ENOMEM;
+
+ init.ops = &clk_fixed_rate_ops;
+
+ /* controllers using QMP phys use 125MHz pipe clock interface */
+ fixed->fixed_rate = 125000000;
+ fixed->hw.init = &init;
+
+ return devm_clk_hw_register(qmp->dev, &fixed->hw);
+}
+
+static const struct phy_ops qcom_qmp_phy_gen_ops = {
+ .init = qcom_qmp_phy_init,
+ .exit = qcom_qmp_phy_exit,
+ .set_mode = qcom_qmp_phy_set_mode,
+ .owner = THIS_MODULE,
+};
+
+static
+int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ struct phy *generic_phy;
+ struct qmp_phy *qphy;
+ char prop_name[MAX_PROP_NAME];
+ int ret;
+
+ qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL);
+ if (!qphy)
+ return -ENOMEM;
+
+ /*
+ * Get memory resources for each phy lane:
+ * Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2; and
+ * pcs_misc (optional) -> 3.
+ */
+ qphy->tx = of_iomap(np, 0);
+ if (!qphy->tx)
+ return -ENOMEM;
+
+ qphy->rx = of_iomap(np, 1);
+ if (!qphy->rx)
+ return -ENOMEM;
+
+ qphy->pcs = of_iomap(np, 2);
+ if (!qphy->pcs)
+ return -ENOMEM;
+
+ qphy->pcs_misc = of_iomap(np, 3);
+ if (!qphy->pcs_misc)
+ dev_vdbg(dev, "PHY pcs_misc-reg not used\n");
+
+ /*
+ * Get PHY's Pipe clock, if any. USB3 and PCIe are PIPE3
+ * based phys, so they essentially have pipe clock. So,
+ * we return error in case phy is USB3 or PIPE type.
+ * Otherwise, we initialize pipe clock to NULL for
+ * all phys that don't need this.
+ */
+ snprintf(prop_name, sizeof(prop_name), "pipe%d", id);
+ qphy->pipe_clk = of_clk_get_by_name(np, prop_name);
+ if (IS_ERR(qphy->pipe_clk)) {
+ if (qmp->cfg->type == PHY_TYPE_PCIE ||
+ qmp->cfg->type == PHY_TYPE_USB3) {
+ ret = PTR_ERR(qphy->pipe_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev,
+ "failed to get lane%d pipe_clk, %d\n",
+ id, ret);
+ return ret;
+ }
+ qphy->pipe_clk = NULL;
+ }
+
+ /* Get lane reset, if any */
+ if (qmp->cfg->has_lane_rst) {
+ snprintf(prop_name, sizeof(prop_name), "lane%d", id);
+ qphy->lane_rst = of_reset_control_get(np, prop_name);
+ if (IS_ERR(qphy->lane_rst)) {
+ dev_err(dev, "failed to get lane%d reset\n", id);
+ return PTR_ERR(qphy->lane_rst);
+ }
+ }
+
+ generic_phy = devm_phy_create(dev, np, &qcom_qmp_phy_gen_ops);
+ if (IS_ERR(generic_phy)) {
+ ret = PTR_ERR(generic_phy);
+ dev_err(dev, "failed to create qphy %d\n", ret);
+ return ret;
+ }
+
+ qphy->phy = generic_phy;
+ qphy->index = id;
+ qphy->qmp = qmp;
+ qmp->phys[id] = qphy;
+ phy_set_drvdata(generic_phy, qphy);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
+ {
+ .compatible = "qcom,msm8996-qmp-pcie-phy",
+ .data = &msm8996_pciephy_cfg,
+ }, {
+ .compatible = "qcom,msm8996-qmp-usb3-phy",
+ .data = &msm8996_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,ipq8074-qmp-pcie-phy",
+ .data = &ipq8074_pciephy_cfg,
+ }, {
+ .compatible = "qcom,sdm845-qmp-usb3-phy",
+ .data = &qmp_v3_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,sdm845-qmp-usb3-uni-phy",
+ .data = &qmp_v3_usb3_uniphy_cfg,
+ }, {
+ .compatible = "qcom,sdm845-qmp-ufs-phy",
+ .data = &sdm845_ufsphy_cfg,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, qcom_qmp_phy_of_match_table);
+
+static const struct dev_pm_ops qcom_qmp_phy_pm_ops = {
+ SET_RUNTIME_PM_OPS(qcom_qmp_phy_runtime_suspend,
+ qcom_qmp_phy_runtime_resume, NULL)
+};
+
+static int qcom_qmp_phy_probe(struct platform_device *pdev)
+{
+ struct qcom_qmp *qmp;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct device_node *child;
+ struct phy_provider *phy_provider;
+ void __iomem *base;
+ int num, id;
+ int ret;
+
+ qmp = devm_kzalloc(dev, sizeof(*qmp), GFP_KERNEL);
+ if (!qmp)
+ return -ENOMEM;
+
+ qmp->dev = dev;
+ dev_set_drvdata(dev, qmp);
+
+ /* Get the specific init parameters of QMP phy */
+ qmp->cfg = of_device_get_match_data(dev);
+ if (!qmp->cfg)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ /* per PHY serdes; usually located at base address */
+ qmp->serdes = base;
+
+ /* per PHY dp_com; if PHY has dp_com control block */
+ if (qmp->cfg->has_phy_dp_com_ctrl) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "dp_com");
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ qmp->dp_com = base;
+ }
+
+ mutex_init(&qmp->phy_mutex);
+
+ ret = qcom_qmp_phy_clk_init(dev);
+ if (ret)
+ return ret;
+
+ ret = qcom_qmp_phy_reset_init(dev);
+ if (ret)
+ return ret;
+
+ ret = qcom_qmp_phy_vreg_init(dev);
+ if (ret) {
+ dev_err(dev, "failed to get regulator supplies\n");
+ return ret;
+ }
+
+ num = of_get_available_child_count(dev->of_node);
+ /* do we have a rogue child node ? */
+ if (num > qmp->cfg->nlanes)
+ return -EINVAL;
+
+ qmp->phys = devm_kcalloc(dev, num, sizeof(*qmp->phys), GFP_KERNEL);
+ if (!qmp->phys)
+ return -ENOMEM;
+
+ id = 0;
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ /*
+ * Prevent runtime pm from being ON by default. Users can enable
+ * it using power/control in sysfs.
+ */
+ pm_runtime_forbid(dev);
+
+ for_each_available_child_of_node(dev->of_node, child) {
+ /* Create per-lane phy */
+ ret = qcom_qmp_phy_create(dev, child, id);
+ if (ret) {
+ dev_err(dev, "failed to create lane%d phy, %d\n",
+ id, ret);
+ pm_runtime_disable(dev);
+ return ret;
+ }
+
+ /*
+ * Register the pipe clock provided by phy.
+ * See function description to see details of this pipe clock.
+ */
+ ret = phy_pipe_clk_register(qmp, child);
+ if (ret) {
+ dev_err(qmp->dev,
+ "failed to register pipe clock source\n");
+ pm_runtime_disable(dev);
+ return ret;
+ }
+ id++;
+ }
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (!IS_ERR(phy_provider))
+ dev_info(dev, "Registered Qcom-QMP phy\n");
+ else
+ pm_runtime_disable(dev);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static struct platform_driver qcom_qmp_phy_driver = {
+ .probe = qcom_qmp_phy_probe,
+ .driver = {
+ .name = "qcom-qmp-phy",
+ .pm = &qcom_qmp_phy_pm_ops,
+ .of_match_table = qcom_qmp_phy_of_match_table,
+ },
+};
+
+module_platform_driver(qcom_qmp_phy_driver);
+
+MODULE_AUTHOR("Vivek Gautam <vivek.gautam@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm QMP PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/rr-cache/722f55cef1394c13414ff3d97a7f9594145c9179/preimage b/rr-cache/722f55cef1394c13414ff3d97a7f9594145c9179/preimage
new file mode 100644
index 0000000..c812971
--- /dev/null
+++ b/rr-cache/722f55cef1394c13414ff3d97a7f9594145c9179/preimage
@@ -0,0 +1,1777 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/phy/phy.h>
+
+#include "phy-qcom-qmp.h"
+
+/* QPHY_SW_RESET bit */
+#define SW_RESET BIT(0)
+/* QPHY_POWER_DOWN_CONTROL */
+#define SW_PWRDN BIT(0)
+#define REFCLK_DRV_DSBL BIT(1)
+/* QPHY_START_CONTROL bits */
+#define SERDES_START BIT(0)
+#define PCS_START BIT(1)
+#define PLL_READY_GATE_EN BIT(3)
+/* QPHY_PCS_STATUS bit */
+#define PHYSTATUS BIT(6)
+/* QPHY_COM_PCS_READY_STATUS bit */
+#define PCS_READY BIT(0)
+
+/* QPHY_V3_DP_COM_RESET_OVRD_CTRL register bits */
+/* DP PHY soft reset */
+#define SW_DPPHY_RESET BIT(0)
+/* mux to select DP PHY reset control, 0:HW control, 1: software reset */
+#define SW_DPPHY_RESET_MUX BIT(1)
+/* USB3 PHY soft reset */
+#define SW_USB3PHY_RESET BIT(2)
+/* mux to select USB3 PHY reset control, 0:HW control, 1: software reset */
+#define SW_USB3PHY_RESET_MUX BIT(3)
+
+/* QPHY_V3_DP_COM_PHY_MODE_CTRL register bits */
+#define USB3_MODE BIT(0) /* enables USB3 mode */
+#define DP_MODE BIT(1) /* enables DP mode */
+
+/* QPHY_PCS_AUTONOMOUS_MODE_CTRL register bits */
+#define ARCVR_DTCT_EN BIT(0)
+#define ALFPS_DTCT_EN BIT(1)
+#define ARCVR_DTCT_EVENT_SEL BIT(4)
+
+/* QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR register bits */
+#define IRQ_CLEAR BIT(0)
+
+/* QPHY_PCS_LFPS_RXTERM_IRQ_STATUS register bits */
+#define RCVR_DETECT BIT(0)
+
+/* QPHY_V3_PCS_MISC_CLAMP_ENABLE register bits */
+#define CLAMP_EN BIT(0) /* enables i/o clamp_n */
+
+#define PHY_INIT_COMPLETE_TIMEOUT 1000
+#define POWER_DOWN_DELAY_US_MIN 10
+#define POWER_DOWN_DELAY_US_MAX 11
+
+#define MAX_PROP_NAME 32
+
+struct qmp_phy_init_tbl {
+ unsigned int offset;
+ unsigned int val;
+ /*
+ * register part of layout ?
+ * if yes, then offset gives index in the reg-layout
+ */
+ int in_layout;
+};
+
+#define QMP_PHY_INIT_CFG(o, v) \
+ { \
+ .offset = o, \
+ .val = v, \
+ }
+
+#define QMP_PHY_INIT_CFG_L(o, v) \
+ { \
+ .offset = o, \
+ .val = v, \
+ .in_layout = 1, \
+ }
+
+/* set of registers with offsets different per-PHY */
+enum qphy_reg_layout {
+ /* Common block control registers */
+ QPHY_COM_SW_RESET,
+ QPHY_COM_POWER_DOWN_CONTROL,
+ QPHY_COM_START_CONTROL,
+ QPHY_COM_PCS_READY_STATUS,
+ /* PCS registers */
+ QPHY_PLL_LOCK_CHK_DLY_TIME,
+ QPHY_FLL_CNTRL1,
+ QPHY_FLL_CNTRL2,
+ QPHY_FLL_CNT_VAL_L,
+ QPHY_FLL_CNT_VAL_H_TOL,
+ QPHY_FLL_MAN_CODE,
+ QPHY_SW_RESET,
+ QPHY_START_CTRL,
+ QPHY_PCS_READY_STATUS,
+ QPHY_PCS_AUTONOMOUS_MODE_CTRL,
+ QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR,
+ QPHY_PCS_LFPS_RXTERM_IRQ_STATUS,
+};
+
+static const unsigned int pciephy_regs_layout[] = {
+ [QPHY_COM_SW_RESET] = 0x400,
+ [QPHY_COM_POWER_DOWN_CONTROL] = 0x404,
+ [QPHY_COM_START_CONTROL] = 0x408,
+ [QPHY_COM_PCS_READY_STATUS] = 0x448,
+ [QPHY_PLL_LOCK_CHK_DLY_TIME] = 0xa8,
+ [QPHY_FLL_CNTRL1] = 0xc4,
+ [QPHY_FLL_CNTRL2] = 0xc8,
+ [QPHY_FLL_CNT_VAL_L] = 0xcc,
+ [QPHY_FLL_CNT_VAL_H_TOL] = 0xd0,
+ [QPHY_FLL_MAN_CODE] = 0xd4,
+ [QPHY_SW_RESET] = 0x00,
+ [QPHY_START_CTRL] = 0x08,
+ [QPHY_PCS_READY_STATUS] = 0x174,
+};
+
+static const unsigned int usb3phy_regs_layout[] = {
+ [QPHY_FLL_CNTRL1] = 0xc0,
+ [QPHY_FLL_CNTRL2] = 0xc4,
+ [QPHY_FLL_CNT_VAL_L] = 0xc8,
+ [QPHY_FLL_CNT_VAL_H_TOL] = 0xcc,
+ [QPHY_FLL_MAN_CODE] = 0xd0,
+ [QPHY_SW_RESET] = 0x00,
+ [QPHY_START_CTRL] = 0x08,
+ [QPHY_PCS_READY_STATUS] = 0x17c,
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x0d4,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x0d8,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x178,
+};
+
+static const unsigned int qmp_v3_usb3phy_regs_layout[] = {
+ [QPHY_SW_RESET] = 0x00,
+ [QPHY_START_CTRL] = 0x08,
+ [QPHY_PCS_READY_STATUS] = 0x174,
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x0d8,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x0dc,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x170,
+};
+
+static const unsigned int ufsphy_regs_layout[] = {
+ [QPHY_START_CTRL] = 0x00,
+ [QPHY_PCS_READY_STATUS] = 0x168,
+};
+
+static const struct qmp_phy_init_tbl msm8996_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_ENABLE1, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x33),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x42),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x33),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_COM_RESCODE_DIV_NUM, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_EP_DIV, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_ENABLE1, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_RESCODE_DIV_NUM, 0x40),
+};
+
+static const struct qmp_phy_init_tbl msm8996_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45),
+ QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x06),
+};
+
+static const struct qmp_phy_init_tbl msm8996_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_ENABLES, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xdb),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_BAND, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN_HALF, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_LVL, 0x19),
+};
+
+static const struct qmp_phy_init_tbl msm8996_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_RX_IDLE_DTCT_CNTRL, 0x4c),
+ QMP_PHY_INIT_CFG(QPHY_PWRUP_RESET_DLY_TIME_AUXCLK, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_LP_WAKEUP_DLY_TIME_AUXCLK, 0x01),
+
+ QMP_PHY_INIT_CFG_L(QPHY_PLL_LOCK_CHK_DLY_TIME, 0x05),
+
+ QMP_PHY_INIT_CFG(QPHY_ENDPOINT_REFCLK_DRIVE, 0x05),
+ QMP_PHY_INIT_CFG(QPHY_POWER_DOWN_CONTROL, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_POWER_STATE_CONFIG4, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_POWER_STATE_CONFIG1, 0xa3),
+ QMP_PHY_INIT_CFG(QPHY_TXDEEMPH_M3P5DB_V0, 0x0e),
+};
+
+static const struct qmp_phy_init_tbl msm8996_usb3_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x04),
+ /* PLL and Loop filter settings */
+ QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_CTRL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_CFG, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x0a),
+ /* SSC settings */
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x07),
+};
+
+static const struct qmp_phy_init_tbl msm8996_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45),
+ QMP_PHY_INIT_CFG(QSERDES_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x06),
+};
+
+static const struct qmp_phy_init_tbl msm8996_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4c),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xbb),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_LVL, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x16),
+};
+
+static const struct qmp_phy_init_tbl msm8996_usb3_pcs_tbl[] = {
+ /* FLL settings */
+ QMP_PHY_INIT_CFG_L(QPHY_FLL_CNTRL2, 0x03),
+ QMP_PHY_INIT_CFG_L(QPHY_FLL_CNTRL1, 0x02),
+ QMP_PHY_INIT_CFG_L(QPHY_FLL_CNT_VAL_L, 0x09),
+ QMP_PHY_INIT_CFG_L(QPHY_FLL_CNT_VAL_H_TOL, 0x42),
+ QMP_PHY_INIT_CFG_L(QPHY_FLL_MAN_CODE, 0x85),
+
+ /* Lock Det settings */
+ QMP_PHY_INIT_CFG(QPHY_LOCK_DETECT_CONFIG1, 0xd1),
+ QMP_PHY_INIT_CFG(QPHY_LOCK_DETECT_CONFIG2, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_LOCK_DETECT_CONFIG3, 0x47),
+ QMP_PHY_INIT_CFG(QPHY_POWER_STATE_CONFIG2, 0x08),
+};
+
+static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_ENABLE1, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0xf),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x6),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0xf),
+ QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV, 0xa),
+ QMP_PHY_INIT_CFG(QSERDES_COM_RESETSM_CNTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0xa),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0xa),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x3),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0xD),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0xD04),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x33),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x2),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0xb),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CTRL_BY_PSM, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_CTRL, 0xa),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x2),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_EP_DIV, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x7),
+};
+
+static const struct qmp_phy_init_tbl ipq8074_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45),
+ QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x6),
+ QMP_PHY_INIT_CFG(QSERDES_TX_RES_CODE_LANE_OFFSET, 0x2),
+ QMP_PHY_INIT_CFG(QSERDES_TX_RCV_DETECT_LVL_2, 0x12),
+};
+
+static const struct qmp_phy_init_tbl ipq8074_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_ENABLES, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xdb),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x4),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN_HALF, 0x4),
+};
+
+static const struct qmp_phy_init_tbl ipq8074_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_ENDPOINT_REFCLK_DRIVE, 0x4),
+ QMP_PHY_INIT_CFG(QPHY_OSC_DTCT_ACTIONS, 0x0),
+ QMP_PHY_INIT_CFG(QPHY_PWRUP_RESET_DLY_TIME_AUXCLK, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB, 0x0),
+ QMP_PHY_INIT_CFG(QPHY_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_PLL_LOCK_CHK_DLY_TIME_AUXCLK_LSB, 0x0),
+ QMP_PHY_INIT_CFG(QPHY_LP_WAKEUP_DLY_TIME_AUXCLK, 0x40),
+ QMP_PHY_INIT_CFG_L(QPHY_PLL_LOCK_CHK_DLY_TIME, 0x73),
+ QMP_PHY_INIT_CFG(QPHY_RX_SIGDET_LVL, 0x99),
+ QMP_PHY_INIT_CFG(QPHY_TXDEEMPH_M6DB_V0, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_TXDEEMPH_M3P5DB_V0, 0xe),
+ QMP_PHY_INIT_CFG_L(QPHY_SW_RESET, 0x0),
+ QMP_PHY_INIT_CFG_L(QPHY_START_CTRL, 0x3),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xc9),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_CFG, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_BUF_ENABLE, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE1, 0x85),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE2, 0x07),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x06),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x75),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_pcs_tbl[] = {
+ /* FLL settings */
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02),
+
+ /* Lock Det settings */
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG2, 0x1b),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0xba),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V0, 0x9f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V1, 0x9f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V2, 0xb7),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V3, 0x4e),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V4, 0x65),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_LS, 0x6b),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V1, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V1, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V2, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V2, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V3, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V3, 0x1d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V4, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V4, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_LS, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_LS, 0x0d),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RATE_SLEW_CNTRL, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
+};
+
+<<<<<<<
+static const struct qmp_phy_init_tbl qmp_v3_usb3_uniphy_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xc9),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_CFG, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_BUF_ENABLE, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE1, 0x85),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE2, 0x07),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_uniphy_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0xc6),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x06),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_uniphy_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_VGA_CAL_CNTRL2, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_00, 0x50),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x75),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_uniphy_pcs_tbl[] = {
+ /* FLL settings */
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02),
+
+ /* Lock Det settings */
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG2, 0x1b),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0xba),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V0, 0x9f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V1, 0x9f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V2, 0xb5),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V3, 0x4c),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V4, 0x64),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_LS, 0x6a),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V1, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V1, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V2, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V2, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V3, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V3, 0x1d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V4, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V4, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_LS, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_LS, 0x0d),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RATE_SLEW_CNTRL, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_REFGEN_REQ_CONFIG1, 0x21),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_REFGEN_REQ_CONFIG2, 0x60),
+};
+
+
+=======
+static const struct qmp_phy_init_tbl ufsphy_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BG_TIMER, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0xd5),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_CTRL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_INITVAL1, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_INITVAL2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xda),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE1, 0x98),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE1, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE1, 0xc1),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE1, 0x32),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE1, 0x0f),
+
+ /*Rate B*/
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x44),
+};
+
+static const struct qmp_phy_init_tbl ufsphy_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x06),
+};
+
+static const struct qmp_phy_init_tbl ufsphy_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_LVL, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_INTERFACE_MODE, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_TERM_BW, 0x5b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1d),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SVS_SO_GAIN_HALF, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SVS_SO_GAIN_QUARTER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SVS_SO_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_PI_CONTROLS, 0x81),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW, 0x80),
+};
+
+static const struct qmp_phy_init_tbl ufsphy_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_DOWN_CONTROL, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_MULTI_LANE_CTRL1, 0x02),
+};
+
+>>>>>>>
+/* struct qmp_phy_cfg - per-PHY initialization config */
+struct qmp_phy_cfg {
+ /* phy-type - PCIE/UFS/USB */
+ unsigned int type;
+ /* number of lanes provided by phy */
+ int nlanes;
+
+ /* Init sequence for PHY blocks - serdes, tx, rx, pcs */
+ const struct qmp_phy_init_tbl *serdes_tbl;
+ int serdes_tbl_num;
+ const struct qmp_phy_init_tbl *tx_tbl;
+ int tx_tbl_num;
+ const struct qmp_phy_init_tbl *rx_tbl;
+ int rx_tbl_num;
+ const struct qmp_phy_init_tbl *pcs_tbl;
+ int pcs_tbl_num;
+
+ /* clock ids to be requested */
+ const char * const *clk_list;
+ int num_clks;
+ /* resets to be requested */
+ const char * const *reset_list;
+ int num_resets;
+ /* regulators to be requested */
+ const char * const *vreg_list;
+ int num_vregs;
+
+ /* array of registers with different offsets */
+ const unsigned int *regs;
+
+ unsigned int start_ctrl;
+ unsigned int pwrdn_ctrl;
+ unsigned int mask_pcs_ready;
+ unsigned int mask_com_pcs_ready;
+
+ /* true, if PHY has a separate PHY_COM control block */
+ bool has_phy_com_ctrl;
+ /* true, if PHY has a reset for individual lanes */
+ bool has_lane_rst;
+ /* true, if PHY needs delay after POWER_DOWN */
+ bool has_pwrdn_delay;
+ /* power_down delay in usec */
+ int pwrdn_delay_min;
+ int pwrdn_delay_max;
+
+ /* true, if PHY has a separate DP_COM control block */
+ bool has_phy_dp_com_ctrl;
+ /* Register offset of secondary tx/rx lanes for USB DP combo PHY */
+ unsigned int tx_b_lane_offset;
+ unsigned int rx_b_lane_offset;
+
+ /* true, if PCS block has a separate SW_RESET register */
+ bool has_sw_rst;
+};
+
+/**
+ * struct qmp_phy - per-lane phy descriptor
+ *
+ * @phy: generic phy
+ * @tx: iomapped memory space for lane's tx
+ * @rx: iomapped memory space for lane's rx
+ * @pcs: iomapped memory space for lane's pcs
+ * @pcs_misc: iomapped memory space for lane's pcs_misc
+ * @pipe_clk: pipe lock
+ * @index: lane index
+ * @qmp: QMP phy to which this lane belongs
+ * @lane_rst: lane's reset controller
+ */
+struct qmp_phy {
+ struct phy *phy;
+ void __iomem *tx;
+ void __iomem *rx;
+ void __iomem *pcs;
+ void __iomem *pcs_misc;
+ struct clk *pipe_clk;
+ unsigned int index;
+ struct qcom_qmp *qmp;
+ struct reset_control *lane_rst;
+};
+
+/**
+ * struct qcom_qmp - structure holding QMP phy block attributes
+ *
+ * @dev: device
+ * @serdes: iomapped memory space for phy's serdes
+ * @dp_com: iomapped memory space for phy's dp_com control block
+ *
+ * @clks: array of clocks required by phy
+ * @resets: array of resets required by phy
+ * @vregs: regulator supplies bulk data
+ *
+ * @cfg: phy specific configuration
+ * @phys: array of per-lane phy descriptors
+ * @phy_mutex: mutex lock for PHY common block initialization
+ * @init_count: phy common block initialization count
+ * @phy_initialized: indicate if PHY has been initialized
+ * @mode: current PHY mode
+ */
+struct qcom_qmp {
+ struct device *dev;
+ void __iomem *serdes;
+ void __iomem *dp_com;
+
+ struct clk_bulk_data *clks;
+ struct reset_control **resets;
+ struct regulator_bulk_data *vregs;
+
+ const struct qmp_phy_cfg *cfg;
+ struct qmp_phy **phys;
+
+ struct mutex phy_mutex;
+ int init_count;
+ bool phy_initialized;
+ enum phy_mode mode;
+};
+
+static inline void qphy_setbits(void __iomem *base, u32 offset, u32 val)
+{
+ u32 reg;
+
+ reg = readl(base + offset);
+ reg |= val;
+ writel(reg, base + offset);
+
+ /* ensure that above write is through */
+ readl(base + offset);
+}
+
+static inline void qphy_clrbits(void __iomem *base, u32 offset, u32 val)
+{
+ u32 reg;
+
+ reg = readl(base + offset);
+ reg &= ~val;
+ writel(reg, base + offset);
+
+ /* ensure that above write is through */
+ readl(base + offset);
+}
+
+/* list of clocks required by phy */
+static const char * const msm8996_phy_clk_l[] = {
+ "aux", "cfg_ahb", "ref",
+};
+
+static const char * const qmp_v3_phy_clk_l[] = {
+ "aux", "cfg_ahb", "ref", "com_aux",
+};
+
+static const char * const sdm845_ufs_phy_clk_l[] = {
+ "ref_clk", "ref_aux_clk",
+};
+
+/* list of resets */
+static const char * const msm8996_pciephy_reset_l[] = {
+ "phy", "common", "cfg",
+};
+
+static const char * const msm8996_usb3phy_reset_l[] = {
+ "phy", "common",
+};
+
+/* list of regulators */
+static const char * const msm8996_phy_vreg_l[] = {
+ "vdda-phy", "vdda-pll",
+};
+
+static const struct qmp_phy_cfg msm8996_pciephy_cfg = {
+ .type = PHY_TYPE_PCIE,
+ .nlanes = 3,
+
+ .serdes_tbl = msm8996_pcie_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(msm8996_pcie_serdes_tbl),
+ .tx_tbl = msm8996_pcie_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(msm8996_pcie_tx_tbl),
+ .rx_tbl = msm8996_pcie_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(msm8996_pcie_rx_tbl),
+ .pcs_tbl = msm8996_pcie_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(msm8996_pcie_pcs_tbl),
+ .clk_list = msm8996_phy_clk_l,
+ .num_clks = ARRAY_SIZE(msm8996_phy_clk_l),
+ .reset_list = msm8996_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_pciephy_reset_l),
+ .vreg_list = msm8996_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(msm8996_phy_vreg_l),
+ .regs = pciephy_regs_layout,
+
+ .start_ctrl = PCS_START | PLL_READY_GATE_EN,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .mask_com_pcs_ready = PCS_READY,
+
+ .has_phy_com_ctrl = true,
+ .has_lane_rst = true,
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+ .has_sw_rst = true,
+};
+
+static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = msm8996_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(msm8996_usb3_serdes_tbl),
+ .tx_tbl = msm8996_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(msm8996_usb3_tx_tbl),
+ .rx_tbl = msm8996_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(msm8996_usb3_rx_tbl),
+ .pcs_tbl = msm8996_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(msm8996_usb3_pcs_tbl),
+ .clk_list = msm8996_phy_clk_l,
+ .num_clks = ARRAY_SIZE(msm8996_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = msm8996_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(msm8996_phy_vreg_l),
+ .regs = usb3phy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .mask_pcs_ready = PHYSTATUS,
+ .has_sw_rst = true,
+};
+
+/* list of resets */
+static const char * const ipq8074_pciephy_reset_l[] = {
+ "phy", "common",
+};
+
+static const struct qmp_phy_cfg ipq8074_pciephy_cfg = {
+ .type = PHY_TYPE_PCIE,
+ .nlanes = 1,
+
+ .serdes_tbl = ipq8074_pcie_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(ipq8074_pcie_serdes_tbl),
+ .tx_tbl = ipq8074_pcie_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(ipq8074_pcie_tx_tbl),
+ .rx_tbl = ipq8074_pcie_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(ipq8074_pcie_rx_tbl),
+ .pcs_tbl = ipq8074_pcie_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(ipq8074_pcie_pcs_tbl),
+ .clk_list = NULL,
+ .num_clks = 0,
+ .reset_list = ipq8074_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l),
+ .vreg_list = NULL,
+ .num_vregs = 0,
+ .regs = pciephy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .mask_pcs_ready = PHYSTATUS,
+
+ .has_phy_com_ctrl = false,
+ .has_lane_rst = false,
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = 995, /* us */
+ .pwrdn_delay_max = 1005, /* us */
+ .has_sw_rst = true,
+};
+
+static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = qmp_v3_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_serdes_tbl),
+ .tx_tbl = qmp_v3_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_tx_tbl),
+ .rx_tbl = qmp_v3_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_rx_tbl),
+ .pcs_tbl = qmp_v3_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(qmp_v3_usb3_pcs_tbl),
+ .clk_list = qmp_v3_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = msm8996_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(msm8996_phy_vreg_l),
+ .regs = qmp_v3_usb3phy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .mask_pcs_ready = PHYSTATUS,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+
+ .has_phy_dp_com_ctrl = true,
+ .tx_b_lane_offset = 0x400,
+ .rx_b_lane_offset = 0x400,
+ .has_sw_rst = true,
+};
+
+static const struct qmp_phy_cfg sdm845_ufsphy_cfg = {
+ .type = PHY_TYPE_UFS,
+ .nlanes = 2,
+
+ .serdes_tbl = ufsphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(ufsphy_serdes_tbl),
+ .tx_tbl = ufsphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(ufsphy_tx_tbl),
+ .rx_tbl = ufsphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(ufsphy_rx_tbl),
+ .pcs_tbl = ufsphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(ufsphy_pcs_tbl),
+ .clk_list = sdm845_ufs_phy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l),
+ .vreg_list = msm8996_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(msm8996_phy_vreg_l),
+ .regs = ufsphy_regs_layout,
+
+ .start_ctrl = SERDES_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .mask_pcs_ready = PCS_READY,
+};
+
+static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = qmp_v3_usb3_uniphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_uniphy_serdes_tbl),
+ .tx_tbl = qmp_v3_usb3_uniphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_uniphy_tx_tbl),
+ .rx_tbl = qmp_v3_usb3_uniphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_uniphy_rx_tbl),
+ .pcs_tbl = qmp_v3_usb3_uniphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(qmp_v3_usb3_uniphy_pcs_tbl),
+ .clk_list = qmp_v3_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = msm8996_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(msm8996_phy_vreg_l),
+ .regs = qmp_v3_usb3phy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .mask_pcs_ready = PHYSTATUS,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+};
+
+static void qcom_qmp_phy_configure(void __iomem *base,
+ const unsigned int *regs,
+ const struct qmp_phy_init_tbl tbl[],
+ int num)
+{
+ int i;
+ const struct qmp_phy_init_tbl *t = tbl;
+
+ if (!t)
+ return;
+
+ for (i = 0; i < num; i++, t++) {
+ if (t->in_layout)
+ writel(t->val, base + regs[t->offset]);
+ else
+ writel(t->val, base + t->offset);
+ }
+}
+
+static int qcom_qmp_phy_com_init(struct qcom_qmp *qmp)
+{
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *serdes = qmp->serdes;
+ void __iomem *dp_com = qmp->dp_com;
+ int ret, i;
+
+ mutex_lock(&qmp->phy_mutex);
+ if (qmp->init_count++) {
+ mutex_unlock(&qmp->phy_mutex);
+ return 0;
+ }
+
+ /* turn on regulator supplies */
+ ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs);
+ if (ret) {
+ dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret);
+ goto err_reg_enable;
+ }
+
+ for (i = 0; i < cfg->num_resets; i++) {
+ ret = reset_control_assert(qmp->resets[i]);
+ if (ret) {
+ dev_err(qmp->dev, "%s reset assert failed\n",
+ cfg->reset_list[i]);
+ goto err_rst_assert;
+ }
+ }
+
+ for (i = cfg->num_resets - 1; i >= 0; i--) {
+ ret = reset_control_deassert(qmp->resets[i]);
+ if (ret) {
+ dev_err(qmp->dev, "%s reset deassert failed\n",
+ qmp->cfg->reset_list[i]);
+ goto err_rst;
+ }
+ }
+
+ ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks);
+ if (ret) {
+ dev_err(qmp->dev, "failed to enable clks, err=%d\n", ret);
+ goto err_rst;
+ }
+
+ if (cfg->has_phy_com_ctrl)
+ qphy_setbits(serdes, cfg->regs[QPHY_COM_POWER_DOWN_CONTROL],
+ SW_PWRDN);
+
+ if (cfg->has_phy_dp_com_ctrl) {
+ qphy_setbits(dp_com, QPHY_V3_DP_COM_POWER_DOWN_CTRL,
+ SW_PWRDN);
+ /* override hardware control for reset of qmp phy */
+ qphy_setbits(dp_com, QPHY_V3_DP_COM_RESET_OVRD_CTRL,
+ SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
+ SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
+
+ qphy_setbits(dp_com, QPHY_V3_DP_COM_PHY_MODE_CTRL,
+ USB3_MODE | DP_MODE);
+
+ /* bring both QMP USB and QMP DP PHYs PCS block out of reset */
+ qphy_clrbits(dp_com, QPHY_V3_DP_COM_RESET_OVRD_CTRL,
+ SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
+ SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
+ }
+
+ /* Serdes configuration */
+ qcom_qmp_phy_configure(serdes, cfg->regs, cfg->serdes_tbl,
+ cfg->serdes_tbl_num);
+
+ if (cfg->has_phy_com_ctrl) {
+ void __iomem *status;
+ unsigned int mask, val;
+
+ if (cfg->has_sw_rst)
+ qphy_clrbits(serdes, cfg->regs[QPHY_COM_SW_RESET],
+ SW_RESET);
+ qphy_setbits(serdes, cfg->regs[QPHY_COM_START_CONTROL],
+ SERDES_START | PCS_START);
+
+ status = serdes + cfg->regs[QPHY_COM_PCS_READY_STATUS];
+ mask = cfg->mask_com_pcs_ready;
+
+ ret = readl_poll_timeout(status, val, (val & mask), 10,
+ PHY_INIT_COMPLETE_TIMEOUT);
+ if (ret) {
+ dev_err(qmp->dev,
+ "phy common block init timed-out\n");
+ goto err_com_init;
+ }
+ }
+
+ mutex_unlock(&qmp->phy_mutex);
+
+ return 0;
+
+err_com_init:
+ clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+err_rst:
+ while (++i < cfg->num_resets)
+ reset_control_assert(qmp->resets[i]);
+err_rst_assert:
+ regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
+err_reg_enable:
+ mutex_unlock(&qmp->phy_mutex);
+
+ return ret;
+}
+
+static int qcom_qmp_phy_com_exit(struct qcom_qmp *qmp)
+{
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *serdes = qmp->serdes;
+ int i = cfg->num_resets;
+
+ mutex_lock(&qmp->phy_mutex);
+ if (--qmp->init_count) {
+ mutex_unlock(&qmp->phy_mutex);
+ return 0;
+ }
+
+ if (cfg->has_phy_com_ctrl) {
+ qphy_setbits(serdes, cfg->regs[QPHY_COM_START_CONTROL],
+ SERDES_START | PCS_START);
+ if (cfg->has_sw_rst)
+ qphy_clrbits(serdes, cfg->regs[QPHY_COM_SW_RESET],
+ SW_RESET);
+ qphy_setbits(serdes, cfg->regs[QPHY_COM_POWER_DOWN_CONTROL],
+ SW_PWRDN);
+ }
+
+ while (--i >= 0)
+ reset_control_assert(qmp->resets[i]);
+
+ clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+
+ regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
+
+ mutex_unlock(&qmp->phy_mutex);
+
+ return 0;
+}
+
+/* PHY Initialization */
+static int qcom_qmp_phy_init(struct phy *phy)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *tx = qphy->tx;
+ void __iomem *rx = qphy->rx;
+ void __iomem *pcs = qphy->pcs;
+ void __iomem *dp_com = qmp->dp_com;
+ void __iomem *status;
+ unsigned int mask, val;
+ int ret;
+
+ dev_vdbg(qmp->dev, "Initializing QMP phy\n");
+
+ ret = qcom_qmp_phy_com_init(qmp);
+ if (ret)
+ return ret;
+
+ if (cfg->has_lane_rst) {
+ ret = reset_control_deassert(qphy->lane_rst);
+ if (ret) {
+ dev_err(qmp->dev, "lane%d reset deassert failed\n",
+ qphy->index);
+ goto err_lane_rst;
+ }
+ }
+
+ ret = clk_prepare_enable(qphy->pipe_clk);
+ if (ret) {
+ dev_err(qmp->dev, "pipe_clk enable failed err=%d\n", ret);
+ goto err_clk_enable;
+ }
+
+ /* Tx, Rx, and PCS configurations */
+ qcom_qmp_phy_configure(tx, cfg->regs, cfg->tx_tbl, cfg->tx_tbl_num);
+ /* Configuration for other LANE for USB-DP combo PHY */
+ if (cfg->has_phy_dp_com_ctrl)
+ qcom_qmp_phy_configure(tx + cfg->tx_b_lane_offset, cfg->regs,
+ cfg->tx_tbl, cfg->tx_tbl_num);
+
+ qcom_qmp_phy_configure(rx, cfg->regs, cfg->rx_tbl, cfg->rx_tbl_num);
+ if (cfg->has_phy_dp_com_ctrl)
+ qcom_qmp_phy_configure(rx + cfg->rx_b_lane_offset, cfg->regs,
+ cfg->rx_tbl, cfg->rx_tbl_num);
+
+ qcom_qmp_phy_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+
+ /*
+ * Pull out PHY from POWER DOWN state.
+ * This is active low enable signal to power-down PHY.
+ */
+ qphy_setbits(pcs, QPHY_POWER_DOWN_CONTROL, cfg->pwrdn_ctrl);
+
+ if (cfg->has_pwrdn_delay)
+ usleep_range(cfg->pwrdn_delay_min, cfg->pwrdn_delay_max);
+
+ /* Pull PHY out of reset state */
+ if (cfg->has_sw_rst)
+ qphy_clrbits(pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+ if (cfg->has_phy_dp_com_ctrl)
+ qphy_clrbits(dp_com, QPHY_V3_DP_COM_SW_RESET, SW_RESET);
+
+ /* start SerDes and Phy-Coding-Sublayer */
+ qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
+
+ status = pcs + cfg->regs[QPHY_PCS_READY_STATUS];
+ mask = cfg->mask_pcs_ready;
+
+ ret = readl_poll_timeout(status, val, !(val & mask), 1,
+ PHY_INIT_COMPLETE_TIMEOUT);
+ if (ret) {
+ dev_err(qmp->dev, "phy initialization timed-out\n");
+ goto err_pcs_ready;
+ }
+ qmp->phy_initialized = true;
+
+ return ret;
+
+err_pcs_ready:
+ clk_disable_unprepare(qphy->pipe_clk);
+err_clk_enable:
+ if (cfg->has_lane_rst)
+ reset_control_assert(qphy->lane_rst);
+err_lane_rst:
+ qcom_qmp_phy_com_exit(qmp);
+
+ return ret;
+}
+
+static int qcom_qmp_phy_exit(struct phy *phy)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+
+ clk_disable_unprepare(qphy->pipe_clk);
+
+ /* PHY reset */
+ if (cfg->has_sw_rst)
+ qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+
+ /* stop SerDes and Phy-Coding-Sublayer */
+ qphy_clrbits(qphy->pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
+
+ /* Put PHY into POWER DOWN state: active low */
+ qphy_clrbits(qphy->pcs, QPHY_POWER_DOWN_CONTROL, cfg->pwrdn_ctrl);
+
+ if (cfg->has_lane_rst)
+ reset_control_assert(qphy->lane_rst);
+
+ qcom_qmp_phy_com_exit(qmp);
+
+ qmp->phy_initialized = false;
+
+ return 0;
+}
+
+static int qcom_qmp_phy_set_mode(struct phy *phy, enum phy_mode mode)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ struct qcom_qmp *qmp = qphy->qmp;
+
+ qmp->mode = mode;
+
+ return 0;
+}
+
+static void qcom_qmp_phy_enable_autonomous_mode(struct qmp_phy *qphy)
+{
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *pcs = qphy->pcs;
+ void __iomem *pcs_misc = qphy->pcs_misc;
+ u32 intr_mask;
+
+ if (qmp->mode == PHY_MODE_USB_HOST_SS ||
+ qmp->mode == PHY_MODE_USB_DEVICE_SS)
+ intr_mask = ARCVR_DTCT_EN | ALFPS_DTCT_EN;
+ else
+ intr_mask = ARCVR_DTCT_EN | ARCVR_DTCT_EVENT_SEL;
+
+ /* Clear any pending interrupts status */
+ qphy_setbits(pcs, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
+ /* Writing 1 followed by 0 clears the interrupt */
+ qphy_clrbits(pcs, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
+
+ qphy_clrbits(pcs, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL],
+ ARCVR_DTCT_EN | ALFPS_DTCT_EN | ARCVR_DTCT_EVENT_SEL);
+
+ /* Enable required PHY autonomous mode interrupts */
+ qphy_setbits(pcs, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL], intr_mask);
+
+ /* Enable i/o clamp_n for autonomous mode */
+ if (pcs_misc)
+ qphy_clrbits(pcs_misc, QPHY_V3_PCS_MISC_CLAMP_ENABLE, CLAMP_EN);
+}
+
+static void qcom_qmp_phy_disable_autonomous_mode(struct qmp_phy *qphy)
+{
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *pcs = qphy->pcs;
+ void __iomem *pcs_misc = qphy->pcs_misc;
+
+ /* Disable i/o clamp_n on resume for normal mode */
+ if (pcs_misc)
+ qphy_setbits(pcs_misc, QPHY_V3_PCS_MISC_CLAMP_ENABLE, CLAMP_EN);
+
+ qphy_clrbits(pcs, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL],
+ ARCVR_DTCT_EN | ARCVR_DTCT_EVENT_SEL | ALFPS_DTCT_EN);
+
+ qphy_setbits(pcs, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
+ /* Writing 1 followed by 0 clears the interrupt */
+ qphy_clrbits(pcs, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
+}
+
+static int __maybe_unused qcom_qmp_phy_runtime_suspend(struct device *dev)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ struct qmp_phy *qphy = qmp->phys[0];
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+
+ dev_vdbg(dev, "Suspending QMP phy, mode:%d\n", qmp->mode);
+
+ /* Supported only for USB3 PHY */
+ if (cfg->type != PHY_TYPE_USB3)
+ return 0;
+
+ if (!qmp->phy_initialized) {
+ dev_vdbg(dev, "PHY not initialized, bailing out\n");
+ return 0;
+ }
+
+ qcom_qmp_phy_enable_autonomous_mode(qphy);
+
+ clk_disable_unprepare(qphy->pipe_clk);
+ clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+
+ return 0;
+}
+
+static int __maybe_unused qcom_qmp_phy_runtime_resume(struct device *dev)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ struct qmp_phy *qphy = qmp->phys[0];
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ int ret = 0;
+
+ dev_vdbg(dev, "Resuming QMP phy, mode:%d\n", qmp->mode);
+
+ /* Supported only for USB3 PHY */
+ if (cfg->type != PHY_TYPE_USB3)
+ return 0;
+
+ if (!qmp->phy_initialized) {
+ dev_vdbg(dev, "PHY not initialized, bailing out\n");
+ return 0;
+ }
+
+ ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks);
+ if (ret) {
+ dev_err(qmp->dev, "failed to enable clks, err=%d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(qphy->pipe_clk);
+ if (ret) {
+ dev_err(dev, "pipe_clk enable failed, err=%d\n", ret);
+ clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+ return ret;
+ }
+
+ qcom_qmp_phy_disable_autonomous_mode(qphy);
+
+ return 0;
+}
+
+static int qcom_qmp_phy_vreg_init(struct device *dev)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ int num = qmp->cfg->num_vregs;
+ int i;
+
+ qmp->vregs = devm_kcalloc(dev, num, sizeof(*qmp->vregs), GFP_KERNEL);
+ if (!qmp->vregs)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++)
+ qmp->vregs[i].supply = qmp->cfg->vreg_list[i];
+
+ return devm_regulator_bulk_get(dev, num, qmp->vregs);
+}
+
+static int qcom_qmp_phy_reset_init(struct device *dev)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ int i;
+
+ qmp->resets = devm_kcalloc(dev, qmp->cfg->num_resets,
+ sizeof(*qmp->resets), GFP_KERNEL);
+ if (!qmp->resets)
+ return -ENOMEM;
+
+ for (i = 0; i < qmp->cfg->num_resets; i++) {
+ struct reset_control *rst;
+ const char *name = qmp->cfg->reset_list[i];
+
+ rst = devm_reset_control_get(dev, name);
+ if (IS_ERR(rst)) {
+ dev_err(dev, "failed to get %s reset\n", name);
+ return PTR_ERR(rst);
+ }
+ qmp->resets[i] = rst;
+ }
+
+ return 0;
+}
+
+static int qcom_qmp_phy_clk_init(struct device *dev)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ int num = qmp->cfg->num_clks;
+ int i;
+
+ qmp->clks = devm_kcalloc(dev, num, sizeof(*qmp->clks), GFP_KERNEL);
+ if (!qmp->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++)
+ qmp->clks[i].id = qmp->cfg->clk_list[i];
+
+ return devm_clk_bulk_get(dev, num, qmp->clks);
+}
+
+/*
+ * Register a fixed rate pipe clock.
+ *
+ * The <s>_pipe_clksrc generated by PHY goes to the GCC that gate
+ * controls it. The <s>_pipe_clk coming out of the GCC is requested
+ * by the PHY driver for its operations.
+ * We register the <s>_pipe_clksrc here. The gcc driver takes care
+ * of assigning this <s>_pipe_clksrc as parent to <s>_pipe_clk.
+ * Below picture shows this relationship.
+ *
+ * +---------------+
+ * | PHY block |<<---------------------------------------+
+ * | | |
+ * | +-------+ | +-----+ |
+ * I/P---^-->| PLL |---^--->pipe_clksrc--->| GCC |--->pipe_clk---+
+ * clk | +-------+ | +-----+
+ * +---------------+
+ */
+static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np)
+{
+ struct clk_fixed_rate *fixed;
+ struct clk_init_data init = { };
+ int ret;
+
+ if ((qmp->cfg->type != PHY_TYPE_USB3) &&
+ (qmp->cfg->type != PHY_TYPE_PCIE)) {
+ /* not all phys register pipe clocks, so return success */
+ return 0;
+ }
+
+ ret = of_property_read_string(np, "clock-output-names", &init.name);
+ if (ret) {
+ dev_err(qmp->dev, "%s: No clock-output-names\n", np->name);
+ return ret;
+ }
+
+ fixed = devm_kzalloc(qmp->dev, sizeof(*fixed), GFP_KERNEL);
+ if (!fixed)
+ return -ENOMEM;
+
+ init.ops = &clk_fixed_rate_ops;
+
+ /* controllers using QMP phys use 125MHz pipe clock interface */
+ fixed->fixed_rate = 125000000;
+ fixed->hw.init = &init;
+
+ return devm_clk_hw_register(qmp->dev, &fixed->hw);
+}
+
+static const struct phy_ops qcom_qmp_phy_gen_ops = {
+ .init = qcom_qmp_phy_init,
+ .exit = qcom_qmp_phy_exit,
+ .set_mode = qcom_qmp_phy_set_mode,
+ .owner = THIS_MODULE,
+};
+
+static
+int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ struct phy *generic_phy;
+ struct qmp_phy *qphy;
+ char prop_name[MAX_PROP_NAME];
+ int ret;
+
+ qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL);
+ if (!qphy)
+ return -ENOMEM;
+
+ /*
+ * Get memory resources for each phy lane:
+ * Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2; and
+ * pcs_misc (optional) -> 3.
+ */
+ qphy->tx = of_iomap(np, 0);
+ if (!qphy->tx)
+ return -ENOMEM;
+
+ qphy->rx = of_iomap(np, 1);
+ if (!qphy->rx)
+ return -ENOMEM;
+
+ qphy->pcs = of_iomap(np, 2);
+ if (!qphy->pcs)
+ return -ENOMEM;
+
+ qphy->pcs_misc = of_iomap(np, 3);
+ if (!qphy->pcs_misc)
+ dev_vdbg(dev, "PHY pcs_misc-reg not used\n");
+
+ /*
+ * Get PHY's Pipe clock, if any. USB3 and PCIe are PIPE3
+ * based phys, so they essentially have pipe clock. So,
+ * we return error in case phy is USB3 or PIPE type.
+ * Otherwise, we initialize pipe clock to NULL for
+ * all phys that don't need this.
+ */
+ snprintf(prop_name, sizeof(prop_name), "pipe%d", id);
+ qphy->pipe_clk = of_clk_get_by_name(np, prop_name);
+ if (IS_ERR(qphy->pipe_clk)) {
+ if (qmp->cfg->type == PHY_TYPE_PCIE ||
+ qmp->cfg->type == PHY_TYPE_USB3) {
+ ret = PTR_ERR(qphy->pipe_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev,
+ "failed to get lane%d pipe_clk, %d\n",
+ id, ret);
+ return ret;
+ }
+ qphy->pipe_clk = NULL;
+ }
+
+ /* Get lane reset, if any */
+ if (qmp->cfg->has_lane_rst) {
+ snprintf(prop_name, sizeof(prop_name), "lane%d", id);
+ qphy->lane_rst = of_reset_control_get(np, prop_name);
+ if (IS_ERR(qphy->lane_rst)) {
+ dev_err(dev, "failed to get lane%d reset\n", id);
+ return PTR_ERR(qphy->lane_rst);
+ }
+ }
+
+ generic_phy = devm_phy_create(dev, np, &qcom_qmp_phy_gen_ops);
+ if (IS_ERR(generic_phy)) {
+ ret = PTR_ERR(generic_phy);
+ dev_err(dev, "failed to create qphy %d\n", ret);
+ return ret;
+ }
+
+ qphy->phy = generic_phy;
+ qphy->index = id;
+ qphy->qmp = qmp;
+ qmp->phys[id] = qphy;
+ phy_set_drvdata(generic_phy, qphy);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
+ {
+ .compatible = "qcom,msm8996-qmp-pcie-phy",
+ .data = &msm8996_pciephy_cfg,
+ }, {
+ .compatible = "qcom,msm8996-qmp-usb3-phy",
+ .data = &msm8996_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,ipq8074-qmp-pcie-phy",
+ .data = &ipq8074_pciephy_cfg,
+ }, {
+ .compatible = "qcom,sdm845-qmp-usb3-phy",
+ .data = &qmp_v3_usb3phy_cfg,
+ }, {
+<<<<<<<
+ .compatible = "qcom,sdm845-qmp-ufs-phy",
+ .data = &sdm845_ufsphy_cfg,
+=======
+ .compatible = "qcom,sdm845-qmp-usb3-uni-phy",
+ .data = &qmp_v3_usb3_uniphy_cfg,
+>>>>>>>
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, qcom_qmp_phy_of_match_table);
+
+static const struct dev_pm_ops qcom_qmp_phy_pm_ops = {
+ SET_RUNTIME_PM_OPS(qcom_qmp_phy_runtime_suspend,
+ qcom_qmp_phy_runtime_resume, NULL)
+};
+
+static int qcom_qmp_phy_probe(struct platform_device *pdev)
+{
+ struct qcom_qmp *qmp;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct device_node *child;
+ struct phy_provider *phy_provider;
+ void __iomem *base;
+ int num, id;
+ int ret;
+
+ qmp = devm_kzalloc(dev, sizeof(*qmp), GFP_KERNEL);
+ if (!qmp)
+ return -ENOMEM;
+
+ qmp->dev = dev;
+ dev_set_drvdata(dev, qmp);
+
+ /* Get the specific init parameters of QMP phy */
+ qmp->cfg = of_device_get_match_data(dev);
+ if (!qmp->cfg)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ /* per PHY serdes; usually located at base address */
+ qmp->serdes = base;
+
+ /* per PHY dp_com; if PHY has dp_com control block */
+ if (qmp->cfg->has_phy_dp_com_ctrl) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "dp_com");
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ qmp->dp_com = base;
+ }
+
+ mutex_init(&qmp->phy_mutex);
+
+ ret = qcom_qmp_phy_clk_init(dev);
+ if (ret)
+ return ret;
+
+ ret = qcom_qmp_phy_reset_init(dev);
+ if (ret)
+ return ret;
+
+ ret = qcom_qmp_phy_vreg_init(dev);
+ if (ret) {
+ dev_err(dev, "failed to get regulator supplies\n");
+ return ret;
+ }
+
+ num = of_get_available_child_count(dev->of_node);
+ /* do we have a rogue child node ? */
+ if (num > qmp->cfg->nlanes)
+ return -EINVAL;
+
+ qmp->phys = devm_kcalloc(dev, num, sizeof(*qmp->phys), GFP_KERNEL);
+ if (!qmp->phys)
+ return -ENOMEM;
+
+ id = 0;
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ /*
+ * Prevent runtime pm from being ON by default. Users can enable
+ * it using power/control in sysfs.
+ */
+ pm_runtime_forbid(dev);
+
+ for_each_available_child_of_node(dev->of_node, child) {
+ /* Create per-lane phy */
+ ret = qcom_qmp_phy_create(dev, child, id);
+ if (ret) {
+ dev_err(dev, "failed to create lane%d phy, %d\n",
+ id, ret);
+ pm_runtime_disable(dev);
+ return ret;
+ }
+
+ /*
+ * Register the pipe clock provided by phy.
+ * See function description to see details of this pipe clock.
+ */
+ ret = phy_pipe_clk_register(qmp, child);
+ if (ret) {
+ dev_err(qmp->dev,
+ "failed to register pipe clock source\n");
+ pm_runtime_disable(dev);
+ return ret;
+ }
+ id++;
+ }
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (!IS_ERR(phy_provider))
+ dev_info(dev, "Registered Qcom-QMP phy\n");
+ else
+ pm_runtime_disable(dev);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static struct platform_driver qcom_qmp_phy_driver = {
+ .probe = qcom_qmp_phy_probe,
+ .driver = {
+ .name = "qcom-qmp-phy",
+ .pm = &qcom_qmp_phy_pm_ops,
+ .of_match_table = qcom_qmp_phy_of_match_table,
+ },
+};
+
+module_platform_driver(qcom_qmp_phy_driver);
+
+MODULE_AUTHOR("Vivek Gautam <vivek.gautam@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm QMP PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/rr-cache/722f55cef1394c13414ff3d97a7f9594145c9179/preimage.1 b/rr-cache/722f55cef1394c13414ff3d97a7f9594145c9179/preimage.1
new file mode 100644
index 0000000..c812971
--- /dev/null
+++ b/rr-cache/722f55cef1394c13414ff3d97a7f9594145c9179/preimage.1
@@ -0,0 +1,1777 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/phy/phy.h>
+
+#include "phy-qcom-qmp.h"
+
+/* QPHY_SW_RESET bit */
+#define SW_RESET BIT(0)
+/* QPHY_POWER_DOWN_CONTROL */
+#define SW_PWRDN BIT(0)
+#define REFCLK_DRV_DSBL BIT(1)
+/* QPHY_START_CONTROL bits */
+#define SERDES_START BIT(0)
+#define PCS_START BIT(1)
+#define PLL_READY_GATE_EN BIT(3)
+/* QPHY_PCS_STATUS bit */
+#define PHYSTATUS BIT(6)
+/* QPHY_COM_PCS_READY_STATUS bit */
+#define PCS_READY BIT(0)
+
+/* QPHY_V3_DP_COM_RESET_OVRD_CTRL register bits */
+/* DP PHY soft reset */
+#define SW_DPPHY_RESET BIT(0)
+/* mux to select DP PHY reset control, 0:HW control, 1: software reset */
+#define SW_DPPHY_RESET_MUX BIT(1)
+/* USB3 PHY soft reset */
+#define SW_USB3PHY_RESET BIT(2)
+/* mux to select USB3 PHY reset control, 0:HW control, 1: software reset */
+#define SW_USB3PHY_RESET_MUX BIT(3)
+
+/* QPHY_V3_DP_COM_PHY_MODE_CTRL register bits */
+#define USB3_MODE BIT(0) /* enables USB3 mode */
+#define DP_MODE BIT(1) /* enables DP mode */
+
+/* QPHY_PCS_AUTONOMOUS_MODE_CTRL register bits */
+#define ARCVR_DTCT_EN BIT(0)
+#define ALFPS_DTCT_EN BIT(1)
+#define ARCVR_DTCT_EVENT_SEL BIT(4)
+
+/* QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR register bits */
+#define IRQ_CLEAR BIT(0)
+
+/* QPHY_PCS_LFPS_RXTERM_IRQ_STATUS register bits */
+#define RCVR_DETECT BIT(0)
+
+/* QPHY_V3_PCS_MISC_CLAMP_ENABLE register bits */
+#define CLAMP_EN BIT(0) /* enables i/o clamp_n */
+
+#define PHY_INIT_COMPLETE_TIMEOUT 1000
+#define POWER_DOWN_DELAY_US_MIN 10
+#define POWER_DOWN_DELAY_US_MAX 11
+
+#define MAX_PROP_NAME 32
+
+struct qmp_phy_init_tbl {
+ unsigned int offset;
+ unsigned int val;
+ /*
+ * register part of layout ?
+ * if yes, then offset gives index in the reg-layout
+ */
+ int in_layout;
+};
+
+#define QMP_PHY_INIT_CFG(o, v) \
+ { \
+ .offset = o, \
+ .val = v, \
+ }
+
+#define QMP_PHY_INIT_CFG_L(o, v) \
+ { \
+ .offset = o, \
+ .val = v, \
+ .in_layout = 1, \
+ }
+
+/* set of registers with offsets different per-PHY */
+enum qphy_reg_layout {
+ /* Common block control registers */
+ QPHY_COM_SW_RESET,
+ QPHY_COM_POWER_DOWN_CONTROL,
+ QPHY_COM_START_CONTROL,
+ QPHY_COM_PCS_READY_STATUS,
+ /* PCS registers */
+ QPHY_PLL_LOCK_CHK_DLY_TIME,
+ QPHY_FLL_CNTRL1,
+ QPHY_FLL_CNTRL2,
+ QPHY_FLL_CNT_VAL_L,
+ QPHY_FLL_CNT_VAL_H_TOL,
+ QPHY_FLL_MAN_CODE,
+ QPHY_SW_RESET,
+ QPHY_START_CTRL,
+ QPHY_PCS_READY_STATUS,
+ QPHY_PCS_AUTONOMOUS_MODE_CTRL,
+ QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR,
+ QPHY_PCS_LFPS_RXTERM_IRQ_STATUS,
+};
+
+static const unsigned int pciephy_regs_layout[] = {
+ [QPHY_COM_SW_RESET] = 0x400,
+ [QPHY_COM_POWER_DOWN_CONTROL] = 0x404,
+ [QPHY_COM_START_CONTROL] = 0x408,
+ [QPHY_COM_PCS_READY_STATUS] = 0x448,
+ [QPHY_PLL_LOCK_CHK_DLY_TIME] = 0xa8,
+ [QPHY_FLL_CNTRL1] = 0xc4,
+ [QPHY_FLL_CNTRL2] = 0xc8,
+ [QPHY_FLL_CNT_VAL_L] = 0xcc,
+ [QPHY_FLL_CNT_VAL_H_TOL] = 0xd0,
+ [QPHY_FLL_MAN_CODE] = 0xd4,
+ [QPHY_SW_RESET] = 0x00,
+ [QPHY_START_CTRL] = 0x08,
+ [QPHY_PCS_READY_STATUS] = 0x174,
+};
+
+static const unsigned int usb3phy_regs_layout[] = {
+ [QPHY_FLL_CNTRL1] = 0xc0,
+ [QPHY_FLL_CNTRL2] = 0xc4,
+ [QPHY_FLL_CNT_VAL_L] = 0xc8,
+ [QPHY_FLL_CNT_VAL_H_TOL] = 0xcc,
+ [QPHY_FLL_MAN_CODE] = 0xd0,
+ [QPHY_SW_RESET] = 0x00,
+ [QPHY_START_CTRL] = 0x08,
+ [QPHY_PCS_READY_STATUS] = 0x17c,
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x0d4,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x0d8,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x178,
+};
+
+static const unsigned int qmp_v3_usb3phy_regs_layout[] = {
+ [QPHY_SW_RESET] = 0x00,
+ [QPHY_START_CTRL] = 0x08,
+ [QPHY_PCS_READY_STATUS] = 0x174,
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x0d8,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x0dc,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x170,
+};
+
+static const unsigned int ufsphy_regs_layout[] = {
+ [QPHY_START_CTRL] = 0x00,
+ [QPHY_PCS_READY_STATUS] = 0x168,
+};
+
+static const struct qmp_phy_init_tbl msm8996_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_ENABLE1, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x33),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x42),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x33),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_COM_RESCODE_DIV_NUM, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_EP_DIV, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_ENABLE1, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_RESCODE_DIV_NUM, 0x40),
+};
+
+static const struct qmp_phy_init_tbl msm8996_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45),
+ QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x06),
+};
+
+static const struct qmp_phy_init_tbl msm8996_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_ENABLES, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xdb),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_BAND, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN_HALF, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_LVL, 0x19),
+};
+
+static const struct qmp_phy_init_tbl msm8996_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_RX_IDLE_DTCT_CNTRL, 0x4c),
+ QMP_PHY_INIT_CFG(QPHY_PWRUP_RESET_DLY_TIME_AUXCLK, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_LP_WAKEUP_DLY_TIME_AUXCLK, 0x01),
+
+ QMP_PHY_INIT_CFG_L(QPHY_PLL_LOCK_CHK_DLY_TIME, 0x05),
+
+ QMP_PHY_INIT_CFG(QPHY_ENDPOINT_REFCLK_DRIVE, 0x05),
+ QMP_PHY_INIT_CFG(QPHY_POWER_DOWN_CONTROL, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_POWER_STATE_CONFIG4, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_POWER_STATE_CONFIG1, 0xa3),
+ QMP_PHY_INIT_CFG(QPHY_TXDEEMPH_M3P5DB_V0, 0x0e),
+};
+
+static const struct qmp_phy_init_tbl msm8996_usb3_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x04),
+ /* PLL and Loop filter settings */
+ QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_CTRL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_CFG, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x0a),
+ /* SSC settings */
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x07),
+};
+
+static const struct qmp_phy_init_tbl msm8996_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45),
+ QMP_PHY_INIT_CFG(QSERDES_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x06),
+};
+
+static const struct qmp_phy_init_tbl msm8996_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4c),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xbb),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_LVL, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x16),
+};
+
+static const struct qmp_phy_init_tbl msm8996_usb3_pcs_tbl[] = {
+ /* FLL settings */
+ QMP_PHY_INIT_CFG_L(QPHY_FLL_CNTRL2, 0x03),
+ QMP_PHY_INIT_CFG_L(QPHY_FLL_CNTRL1, 0x02),
+ QMP_PHY_INIT_CFG_L(QPHY_FLL_CNT_VAL_L, 0x09),
+ QMP_PHY_INIT_CFG_L(QPHY_FLL_CNT_VAL_H_TOL, 0x42),
+ QMP_PHY_INIT_CFG_L(QPHY_FLL_MAN_CODE, 0x85),
+
+ /* Lock Det settings */
+ QMP_PHY_INIT_CFG(QPHY_LOCK_DETECT_CONFIG1, 0xd1),
+ QMP_PHY_INIT_CFG(QPHY_LOCK_DETECT_CONFIG2, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_LOCK_DETECT_CONFIG3, 0x47),
+ QMP_PHY_INIT_CFG(QPHY_POWER_STATE_CONFIG2, 0x08),
+};
+
+static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_ENABLE1, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0xf),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x6),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0xf),
+ QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV, 0xa),
+ QMP_PHY_INIT_CFG(QSERDES_COM_RESETSM_CNTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0xa),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0xa),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x3),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0xD),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0xD04),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x33),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x2),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0xb),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CTRL_BY_PSM, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_CTRL, 0xa),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x2),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_EP_DIV, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x7),
+};
+
+static const struct qmp_phy_init_tbl ipq8074_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45),
+ QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x6),
+ QMP_PHY_INIT_CFG(QSERDES_TX_RES_CODE_LANE_OFFSET, 0x2),
+ QMP_PHY_INIT_CFG(QSERDES_TX_RCV_DETECT_LVL_2, 0x12),
+};
+
+static const struct qmp_phy_init_tbl ipq8074_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_ENABLES, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xdb),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x4),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN_HALF, 0x4),
+};
+
+static const struct qmp_phy_init_tbl ipq8074_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_ENDPOINT_REFCLK_DRIVE, 0x4),
+ QMP_PHY_INIT_CFG(QPHY_OSC_DTCT_ACTIONS, 0x0),
+ QMP_PHY_INIT_CFG(QPHY_PWRUP_RESET_DLY_TIME_AUXCLK, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB, 0x0),
+ QMP_PHY_INIT_CFG(QPHY_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_PLL_LOCK_CHK_DLY_TIME_AUXCLK_LSB, 0x0),
+ QMP_PHY_INIT_CFG(QPHY_LP_WAKEUP_DLY_TIME_AUXCLK, 0x40),
+ QMP_PHY_INIT_CFG_L(QPHY_PLL_LOCK_CHK_DLY_TIME, 0x73),
+ QMP_PHY_INIT_CFG(QPHY_RX_SIGDET_LVL, 0x99),
+ QMP_PHY_INIT_CFG(QPHY_TXDEEMPH_M6DB_V0, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_TXDEEMPH_M3P5DB_V0, 0xe),
+ QMP_PHY_INIT_CFG_L(QPHY_SW_RESET, 0x0),
+ QMP_PHY_INIT_CFG_L(QPHY_START_CTRL, 0x3),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xc9),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_CFG, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_BUF_ENABLE, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE1, 0x85),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE2, 0x07),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x06),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x75),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_pcs_tbl[] = {
+ /* FLL settings */
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02),
+
+ /* Lock Det settings */
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG2, 0x1b),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0xba),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V0, 0x9f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V1, 0x9f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V2, 0xb7),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V3, 0x4e),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V4, 0x65),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_LS, 0x6b),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V1, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V1, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V2, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V2, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V3, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V3, 0x1d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V4, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V4, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_LS, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_LS, 0x0d),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RATE_SLEW_CNTRL, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
+};
+
+<<<<<<<
+static const struct qmp_phy_init_tbl qmp_v3_usb3_uniphy_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xc9),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_CFG, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_BUF_ENABLE, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE1, 0x85),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE2, 0x07),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_uniphy_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0xc6),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x06),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_uniphy_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_VGA_CAL_CNTRL2, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_00, 0x50),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x75),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_uniphy_pcs_tbl[] = {
+ /* FLL settings */
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02),
+
+ /* Lock Det settings */
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG2, 0x1b),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0xba),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V0, 0x9f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V1, 0x9f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V2, 0xb5),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V3, 0x4c),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V4, 0x64),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_LS, 0x6a),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V1, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V1, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V2, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V2, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V3, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V3, 0x1d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V4, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V4, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_LS, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_LS, 0x0d),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RATE_SLEW_CNTRL, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_REFGEN_REQ_CONFIG1, 0x21),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_REFGEN_REQ_CONFIG2, 0x60),
+};
+
+
+=======
+static const struct qmp_phy_init_tbl ufsphy_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BG_TIMER, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0xd5),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_CTRL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_INITVAL1, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_INITVAL2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xda),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE1, 0x98),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE1, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE1, 0xc1),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE1, 0x32),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE1, 0x0f),
+
+ /*Rate B*/
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x44),
+};
+
+static const struct qmp_phy_init_tbl ufsphy_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x06),
+};
+
+static const struct qmp_phy_init_tbl ufsphy_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_LVL, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_INTERFACE_MODE, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_TERM_BW, 0x5b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1d),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SVS_SO_GAIN_HALF, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SVS_SO_GAIN_QUARTER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SVS_SO_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_PI_CONTROLS, 0x81),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW, 0x80),
+};
+
+static const struct qmp_phy_init_tbl ufsphy_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_DOWN_CONTROL, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_MULTI_LANE_CTRL1, 0x02),
+};
+
+>>>>>>>
+/* struct qmp_phy_cfg - per-PHY initialization config */
+struct qmp_phy_cfg {
+ /* phy-type - PCIE/UFS/USB */
+ unsigned int type;
+ /* number of lanes provided by phy */
+ int nlanes;
+
+ /* Init sequence for PHY blocks - serdes, tx, rx, pcs */
+ const struct qmp_phy_init_tbl *serdes_tbl;
+ int serdes_tbl_num;
+ const struct qmp_phy_init_tbl *tx_tbl;
+ int tx_tbl_num;
+ const struct qmp_phy_init_tbl *rx_tbl;
+ int rx_tbl_num;
+ const struct qmp_phy_init_tbl *pcs_tbl;
+ int pcs_tbl_num;
+
+ /* clock ids to be requested */
+ const char * const *clk_list;
+ int num_clks;
+ /* resets to be requested */
+ const char * const *reset_list;
+ int num_resets;
+ /* regulators to be requested */
+ const char * const *vreg_list;
+ int num_vregs;
+
+ /* array of registers with different offsets */
+ const unsigned int *regs;
+
+ unsigned int start_ctrl;
+ unsigned int pwrdn_ctrl;
+ unsigned int mask_pcs_ready;
+ unsigned int mask_com_pcs_ready;
+
+ /* true, if PHY has a separate PHY_COM control block */
+ bool has_phy_com_ctrl;
+ /* true, if PHY has a reset for individual lanes */
+ bool has_lane_rst;
+ /* true, if PHY needs delay after POWER_DOWN */
+ bool has_pwrdn_delay;
+ /* power_down delay in usec */
+ int pwrdn_delay_min;
+ int pwrdn_delay_max;
+
+ /* true, if PHY has a separate DP_COM control block */
+ bool has_phy_dp_com_ctrl;
+ /* Register offset of secondary tx/rx lanes for USB DP combo PHY */
+ unsigned int tx_b_lane_offset;
+ unsigned int rx_b_lane_offset;
+
+ /* true, if PCS block has a separate SW_RESET register */
+ bool has_sw_rst;
+};
+
+/**
+ * struct qmp_phy - per-lane phy descriptor
+ *
+ * @phy: generic phy
+ * @tx: iomapped memory space for lane's tx
+ * @rx: iomapped memory space for lane's rx
+ * @pcs: iomapped memory space for lane's pcs
+ * @pcs_misc: iomapped memory space for lane's pcs_misc
+ * @pipe_clk: pipe lock
+ * @index: lane index
+ * @qmp: QMP phy to which this lane belongs
+ * @lane_rst: lane's reset controller
+ */
+struct qmp_phy {
+ struct phy *phy;
+ void __iomem *tx;
+ void __iomem *rx;
+ void __iomem *pcs;
+ void __iomem *pcs_misc;
+ struct clk *pipe_clk;
+ unsigned int index;
+ struct qcom_qmp *qmp;
+ struct reset_control *lane_rst;
+};
+
+/**
+ * struct qcom_qmp - structure holding QMP phy block attributes
+ *
+ * @dev: device
+ * @serdes: iomapped memory space for phy's serdes
+ * @dp_com: iomapped memory space for phy's dp_com control block
+ *
+ * @clks: array of clocks required by phy
+ * @resets: array of resets required by phy
+ * @vregs: regulator supplies bulk data
+ *
+ * @cfg: phy specific configuration
+ * @phys: array of per-lane phy descriptors
+ * @phy_mutex: mutex lock for PHY common block initialization
+ * @init_count: phy common block initialization count
+ * @phy_initialized: indicate if PHY has been initialized
+ * @mode: current PHY mode
+ */
+struct qcom_qmp {
+ struct device *dev;
+ void __iomem *serdes;
+ void __iomem *dp_com;
+
+ struct clk_bulk_data *clks;
+ struct reset_control **resets;
+ struct regulator_bulk_data *vregs;
+
+ const struct qmp_phy_cfg *cfg;
+ struct qmp_phy **phys;
+
+ struct mutex phy_mutex;
+ int init_count;
+ bool phy_initialized;
+ enum phy_mode mode;
+};
+
+static inline void qphy_setbits(void __iomem *base, u32 offset, u32 val)
+{
+ u32 reg;
+
+ reg = readl(base + offset);
+ reg |= val;
+ writel(reg, base + offset);
+
+ /* ensure that above write is through */
+ readl(base + offset);
+}
+
+static inline void qphy_clrbits(void __iomem *base, u32 offset, u32 val)
+{
+ u32 reg;
+
+ reg = readl(base + offset);
+ reg &= ~val;
+ writel(reg, base + offset);
+
+ /* ensure that above write is through */
+ readl(base + offset);
+}
+
+/* list of clocks required by phy */
+static const char * const msm8996_phy_clk_l[] = {
+ "aux", "cfg_ahb", "ref",
+};
+
+static const char * const qmp_v3_phy_clk_l[] = {
+ "aux", "cfg_ahb", "ref", "com_aux",
+};
+
+static const char * const sdm845_ufs_phy_clk_l[] = {
+ "ref_clk", "ref_aux_clk",
+};
+
+/* list of resets */
+static const char * const msm8996_pciephy_reset_l[] = {
+ "phy", "common", "cfg",
+};
+
+static const char * const msm8996_usb3phy_reset_l[] = {
+ "phy", "common",
+};
+
+/* list of regulators */
+static const char * const msm8996_phy_vreg_l[] = {
+ "vdda-phy", "vdda-pll",
+};
+
+static const struct qmp_phy_cfg msm8996_pciephy_cfg = {
+ .type = PHY_TYPE_PCIE,
+ .nlanes = 3,
+
+ .serdes_tbl = msm8996_pcie_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(msm8996_pcie_serdes_tbl),
+ .tx_tbl = msm8996_pcie_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(msm8996_pcie_tx_tbl),
+ .rx_tbl = msm8996_pcie_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(msm8996_pcie_rx_tbl),
+ .pcs_tbl = msm8996_pcie_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(msm8996_pcie_pcs_tbl),
+ .clk_list = msm8996_phy_clk_l,
+ .num_clks = ARRAY_SIZE(msm8996_phy_clk_l),
+ .reset_list = msm8996_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_pciephy_reset_l),
+ .vreg_list = msm8996_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(msm8996_phy_vreg_l),
+ .regs = pciephy_regs_layout,
+
+ .start_ctrl = PCS_START | PLL_READY_GATE_EN,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .mask_com_pcs_ready = PCS_READY,
+
+ .has_phy_com_ctrl = true,
+ .has_lane_rst = true,
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+ .has_sw_rst = true,
+};
+
+static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = msm8996_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(msm8996_usb3_serdes_tbl),
+ .tx_tbl = msm8996_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(msm8996_usb3_tx_tbl),
+ .rx_tbl = msm8996_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(msm8996_usb3_rx_tbl),
+ .pcs_tbl = msm8996_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(msm8996_usb3_pcs_tbl),
+ .clk_list = msm8996_phy_clk_l,
+ .num_clks = ARRAY_SIZE(msm8996_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = msm8996_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(msm8996_phy_vreg_l),
+ .regs = usb3phy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .mask_pcs_ready = PHYSTATUS,
+ .has_sw_rst = true,
+};
+
+/* list of resets */
+static const char * const ipq8074_pciephy_reset_l[] = {
+ "phy", "common",
+};
+
+static const struct qmp_phy_cfg ipq8074_pciephy_cfg = {
+ .type = PHY_TYPE_PCIE,
+ .nlanes = 1,
+
+ .serdes_tbl = ipq8074_pcie_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(ipq8074_pcie_serdes_tbl),
+ .tx_tbl = ipq8074_pcie_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(ipq8074_pcie_tx_tbl),
+ .rx_tbl = ipq8074_pcie_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(ipq8074_pcie_rx_tbl),
+ .pcs_tbl = ipq8074_pcie_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(ipq8074_pcie_pcs_tbl),
+ .clk_list = NULL,
+ .num_clks = 0,
+ .reset_list = ipq8074_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l),
+ .vreg_list = NULL,
+ .num_vregs = 0,
+ .regs = pciephy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .mask_pcs_ready = PHYSTATUS,
+
+ .has_phy_com_ctrl = false,
+ .has_lane_rst = false,
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = 995, /* us */
+ .pwrdn_delay_max = 1005, /* us */
+ .has_sw_rst = true,
+};
+
+static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = qmp_v3_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_serdes_tbl),
+ .tx_tbl = qmp_v3_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_tx_tbl),
+ .rx_tbl = qmp_v3_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_rx_tbl),
+ .pcs_tbl = qmp_v3_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(qmp_v3_usb3_pcs_tbl),
+ .clk_list = qmp_v3_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = msm8996_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(msm8996_phy_vreg_l),
+ .regs = qmp_v3_usb3phy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .mask_pcs_ready = PHYSTATUS,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+
+ .has_phy_dp_com_ctrl = true,
+ .tx_b_lane_offset = 0x400,
+ .rx_b_lane_offset = 0x400,
+ .has_sw_rst = true,
+};
+
+static const struct qmp_phy_cfg sdm845_ufsphy_cfg = {
+ .type = PHY_TYPE_UFS,
+ .nlanes = 2,
+
+ .serdes_tbl = ufsphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(ufsphy_serdes_tbl),
+ .tx_tbl = ufsphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(ufsphy_tx_tbl),
+ .rx_tbl = ufsphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(ufsphy_rx_tbl),
+ .pcs_tbl = ufsphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(ufsphy_pcs_tbl),
+ .clk_list = sdm845_ufs_phy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l),
+ .vreg_list = msm8996_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(msm8996_phy_vreg_l),
+ .regs = ufsphy_regs_layout,
+
+ .start_ctrl = SERDES_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .mask_pcs_ready = PCS_READY,
+};
+
+static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = qmp_v3_usb3_uniphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_uniphy_serdes_tbl),
+ .tx_tbl = qmp_v3_usb3_uniphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_uniphy_tx_tbl),
+ .rx_tbl = qmp_v3_usb3_uniphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_uniphy_rx_tbl),
+ .pcs_tbl = qmp_v3_usb3_uniphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(qmp_v3_usb3_uniphy_pcs_tbl),
+ .clk_list = qmp_v3_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = msm8996_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(msm8996_phy_vreg_l),
+ .regs = qmp_v3_usb3phy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .mask_pcs_ready = PHYSTATUS,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+};
+
+static void qcom_qmp_phy_configure(void __iomem *base,
+ const unsigned int *regs,
+ const struct qmp_phy_init_tbl tbl[],
+ int num)
+{
+ int i;
+ const struct qmp_phy_init_tbl *t = tbl;
+
+ if (!t)
+ return;
+
+ for (i = 0; i < num; i++, t++) {
+ if (t->in_layout)
+ writel(t->val, base + regs[t->offset]);
+ else
+ writel(t->val, base + t->offset);
+ }
+}
+
+static int qcom_qmp_phy_com_init(struct qcom_qmp *qmp)
+{
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *serdes = qmp->serdes;
+ void __iomem *dp_com = qmp->dp_com;
+ int ret, i;
+
+ mutex_lock(&qmp->phy_mutex);
+ if (qmp->init_count++) {
+ mutex_unlock(&qmp->phy_mutex);
+ return 0;
+ }
+
+ /* turn on regulator supplies */
+ ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs);
+ if (ret) {
+ dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret);
+ goto err_reg_enable;
+ }
+
+ for (i = 0; i < cfg->num_resets; i++) {
+ ret = reset_control_assert(qmp->resets[i]);
+ if (ret) {
+ dev_err(qmp->dev, "%s reset assert failed\n",
+ cfg->reset_list[i]);
+ goto err_rst_assert;
+ }
+ }
+
+ for (i = cfg->num_resets - 1; i >= 0; i--) {
+ ret = reset_control_deassert(qmp->resets[i]);
+ if (ret) {
+ dev_err(qmp->dev, "%s reset deassert failed\n",
+ qmp->cfg->reset_list[i]);
+ goto err_rst;
+ }
+ }
+
+ ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks);
+ if (ret) {
+ dev_err(qmp->dev, "failed to enable clks, err=%d\n", ret);
+ goto err_rst;
+ }
+
+ if (cfg->has_phy_com_ctrl)
+ qphy_setbits(serdes, cfg->regs[QPHY_COM_POWER_DOWN_CONTROL],
+ SW_PWRDN);
+
+ if (cfg->has_phy_dp_com_ctrl) {
+ qphy_setbits(dp_com, QPHY_V3_DP_COM_POWER_DOWN_CTRL,
+ SW_PWRDN);
+ /* override hardware control for reset of qmp phy */
+ qphy_setbits(dp_com, QPHY_V3_DP_COM_RESET_OVRD_CTRL,
+ SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
+ SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
+
+ qphy_setbits(dp_com, QPHY_V3_DP_COM_PHY_MODE_CTRL,
+ USB3_MODE | DP_MODE);
+
+ /* bring both QMP USB and QMP DP PHYs PCS block out of reset */
+ qphy_clrbits(dp_com, QPHY_V3_DP_COM_RESET_OVRD_CTRL,
+ SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
+ SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
+ }
+
+ /* Serdes configuration */
+ qcom_qmp_phy_configure(serdes, cfg->regs, cfg->serdes_tbl,
+ cfg->serdes_tbl_num);
+
+ if (cfg->has_phy_com_ctrl) {
+ void __iomem *status;
+ unsigned int mask, val;
+
+ if (cfg->has_sw_rst)
+ qphy_clrbits(serdes, cfg->regs[QPHY_COM_SW_RESET],
+ SW_RESET);
+ qphy_setbits(serdes, cfg->regs[QPHY_COM_START_CONTROL],
+ SERDES_START | PCS_START);
+
+ status = serdes + cfg->regs[QPHY_COM_PCS_READY_STATUS];
+ mask = cfg->mask_com_pcs_ready;
+
+ ret = readl_poll_timeout(status, val, (val & mask), 10,
+ PHY_INIT_COMPLETE_TIMEOUT);
+ if (ret) {
+ dev_err(qmp->dev,
+ "phy common block init timed-out\n");
+ goto err_com_init;
+ }
+ }
+
+ mutex_unlock(&qmp->phy_mutex);
+
+ return 0;
+
+err_com_init:
+ clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+err_rst:
+ while (++i < cfg->num_resets)
+ reset_control_assert(qmp->resets[i]);
+err_rst_assert:
+ regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
+err_reg_enable:
+ mutex_unlock(&qmp->phy_mutex);
+
+ return ret;
+}
+
+static int qcom_qmp_phy_com_exit(struct qcom_qmp *qmp)
+{
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *serdes = qmp->serdes;
+ int i = cfg->num_resets;
+
+ mutex_lock(&qmp->phy_mutex);
+ if (--qmp->init_count) {
+ mutex_unlock(&qmp->phy_mutex);
+ return 0;
+ }
+
+ if (cfg->has_phy_com_ctrl) {
+ qphy_setbits(serdes, cfg->regs[QPHY_COM_START_CONTROL],
+ SERDES_START | PCS_START);
+ if (cfg->has_sw_rst)
+ qphy_clrbits(serdes, cfg->regs[QPHY_COM_SW_RESET],
+ SW_RESET);
+ qphy_setbits(serdes, cfg->regs[QPHY_COM_POWER_DOWN_CONTROL],
+ SW_PWRDN);
+ }
+
+ while (--i >= 0)
+ reset_control_assert(qmp->resets[i]);
+
+ clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+
+ regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
+
+ mutex_unlock(&qmp->phy_mutex);
+
+ return 0;
+}
+
+/* PHY Initialization */
+static int qcom_qmp_phy_init(struct phy *phy)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *tx = qphy->tx;
+ void __iomem *rx = qphy->rx;
+ void __iomem *pcs = qphy->pcs;
+ void __iomem *dp_com = qmp->dp_com;
+ void __iomem *status;
+ unsigned int mask, val;
+ int ret;
+
+ dev_vdbg(qmp->dev, "Initializing QMP phy\n");
+
+ ret = qcom_qmp_phy_com_init(qmp);
+ if (ret)
+ return ret;
+
+ if (cfg->has_lane_rst) {
+ ret = reset_control_deassert(qphy->lane_rst);
+ if (ret) {
+ dev_err(qmp->dev, "lane%d reset deassert failed\n",
+ qphy->index);
+ goto err_lane_rst;
+ }
+ }
+
+ ret = clk_prepare_enable(qphy->pipe_clk);
+ if (ret) {
+ dev_err(qmp->dev, "pipe_clk enable failed err=%d\n", ret);
+ goto err_clk_enable;
+ }
+
+ /* Tx, Rx, and PCS configurations */
+ qcom_qmp_phy_configure(tx, cfg->regs, cfg->tx_tbl, cfg->tx_tbl_num);
+ /* Configuration for other LANE for USB-DP combo PHY */
+ if (cfg->has_phy_dp_com_ctrl)
+ qcom_qmp_phy_configure(tx + cfg->tx_b_lane_offset, cfg->regs,
+ cfg->tx_tbl, cfg->tx_tbl_num);
+
+ qcom_qmp_phy_configure(rx, cfg->regs, cfg->rx_tbl, cfg->rx_tbl_num);
+ if (cfg->has_phy_dp_com_ctrl)
+ qcom_qmp_phy_configure(rx + cfg->rx_b_lane_offset, cfg->regs,
+ cfg->rx_tbl, cfg->rx_tbl_num);
+
+ qcom_qmp_phy_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+
+ /*
+ * Pull out PHY from POWER DOWN state.
+ * This is active low enable signal to power-down PHY.
+ */
+ qphy_setbits(pcs, QPHY_POWER_DOWN_CONTROL, cfg->pwrdn_ctrl);
+
+ if (cfg->has_pwrdn_delay)
+ usleep_range(cfg->pwrdn_delay_min, cfg->pwrdn_delay_max);
+
+ /* Pull PHY out of reset state */
+ if (cfg->has_sw_rst)
+ qphy_clrbits(pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+ if (cfg->has_phy_dp_com_ctrl)
+ qphy_clrbits(dp_com, QPHY_V3_DP_COM_SW_RESET, SW_RESET);
+
+ /* start SerDes and Phy-Coding-Sublayer */
+ qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
+
+ status = pcs + cfg->regs[QPHY_PCS_READY_STATUS];
+ mask = cfg->mask_pcs_ready;
+
+ ret = readl_poll_timeout(status, val, !(val & mask), 1,
+ PHY_INIT_COMPLETE_TIMEOUT);
+ if (ret) {
+ dev_err(qmp->dev, "phy initialization timed-out\n");
+ goto err_pcs_ready;
+ }
+ qmp->phy_initialized = true;
+
+ return ret;
+
+err_pcs_ready:
+ clk_disable_unprepare(qphy->pipe_clk);
+err_clk_enable:
+ if (cfg->has_lane_rst)
+ reset_control_assert(qphy->lane_rst);
+err_lane_rst:
+ qcom_qmp_phy_com_exit(qmp);
+
+ return ret;
+}
+
+static int qcom_qmp_phy_exit(struct phy *phy)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+
+ clk_disable_unprepare(qphy->pipe_clk);
+
+ /* PHY reset */
+ if (cfg->has_sw_rst)
+ qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+
+ /* stop SerDes and Phy-Coding-Sublayer */
+ qphy_clrbits(qphy->pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
+
+ /* Put PHY into POWER DOWN state: active low */
+ qphy_clrbits(qphy->pcs, QPHY_POWER_DOWN_CONTROL, cfg->pwrdn_ctrl);
+
+ if (cfg->has_lane_rst)
+ reset_control_assert(qphy->lane_rst);
+
+ qcom_qmp_phy_com_exit(qmp);
+
+ qmp->phy_initialized = false;
+
+ return 0;
+}
+
+static int qcom_qmp_phy_set_mode(struct phy *phy, enum phy_mode mode)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ struct qcom_qmp *qmp = qphy->qmp;
+
+ qmp->mode = mode;
+
+ return 0;
+}
+
+static void qcom_qmp_phy_enable_autonomous_mode(struct qmp_phy *qphy)
+{
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *pcs = qphy->pcs;
+ void __iomem *pcs_misc = qphy->pcs_misc;
+ u32 intr_mask;
+
+ if (qmp->mode == PHY_MODE_USB_HOST_SS ||
+ qmp->mode == PHY_MODE_USB_DEVICE_SS)
+ intr_mask = ARCVR_DTCT_EN | ALFPS_DTCT_EN;
+ else
+ intr_mask = ARCVR_DTCT_EN | ARCVR_DTCT_EVENT_SEL;
+
+ /* Clear any pending interrupts status */
+ qphy_setbits(pcs, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
+ /* Writing 1 followed by 0 clears the interrupt */
+ qphy_clrbits(pcs, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
+
+ qphy_clrbits(pcs, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL],
+ ARCVR_DTCT_EN | ALFPS_DTCT_EN | ARCVR_DTCT_EVENT_SEL);
+
+ /* Enable required PHY autonomous mode interrupts */
+ qphy_setbits(pcs, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL], intr_mask);
+
+ /* Enable i/o clamp_n for autonomous mode */
+ if (pcs_misc)
+ qphy_clrbits(pcs_misc, QPHY_V3_PCS_MISC_CLAMP_ENABLE, CLAMP_EN);
+}
+
+static void qcom_qmp_phy_disable_autonomous_mode(struct qmp_phy *qphy)
+{
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *pcs = qphy->pcs;
+ void __iomem *pcs_misc = qphy->pcs_misc;
+
+ /* Disable i/o clamp_n on resume for normal mode */
+ if (pcs_misc)
+ qphy_setbits(pcs_misc, QPHY_V3_PCS_MISC_CLAMP_ENABLE, CLAMP_EN);
+
+ qphy_clrbits(pcs, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL],
+ ARCVR_DTCT_EN | ARCVR_DTCT_EVENT_SEL | ALFPS_DTCT_EN);
+
+ qphy_setbits(pcs, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
+ /* Writing 1 followed by 0 clears the interrupt */
+ qphy_clrbits(pcs, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
+}
+
+static int __maybe_unused qcom_qmp_phy_runtime_suspend(struct device *dev)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ struct qmp_phy *qphy = qmp->phys[0];
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+
+ dev_vdbg(dev, "Suspending QMP phy, mode:%d\n", qmp->mode);
+
+ /* Supported only for USB3 PHY */
+ if (cfg->type != PHY_TYPE_USB3)
+ return 0;
+
+ if (!qmp->phy_initialized) {
+ dev_vdbg(dev, "PHY not initialized, bailing out\n");
+ return 0;
+ }
+
+ qcom_qmp_phy_enable_autonomous_mode(qphy);
+
+ clk_disable_unprepare(qphy->pipe_clk);
+ clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+
+ return 0;
+}
+
+static int __maybe_unused qcom_qmp_phy_runtime_resume(struct device *dev)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ struct qmp_phy *qphy = qmp->phys[0];
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ int ret = 0;
+
+ dev_vdbg(dev, "Resuming QMP phy, mode:%d\n", qmp->mode);
+
+ /* Supported only for USB3 PHY */
+ if (cfg->type != PHY_TYPE_USB3)
+ return 0;
+
+ if (!qmp->phy_initialized) {
+ dev_vdbg(dev, "PHY not initialized, bailing out\n");
+ return 0;
+ }
+
+ ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks);
+ if (ret) {
+ dev_err(qmp->dev, "failed to enable clks, err=%d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(qphy->pipe_clk);
+ if (ret) {
+ dev_err(dev, "pipe_clk enable failed, err=%d\n", ret);
+ clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+ return ret;
+ }
+
+ qcom_qmp_phy_disable_autonomous_mode(qphy);
+
+ return 0;
+}
+
+static int qcom_qmp_phy_vreg_init(struct device *dev)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ int num = qmp->cfg->num_vregs;
+ int i;
+
+ qmp->vregs = devm_kcalloc(dev, num, sizeof(*qmp->vregs), GFP_KERNEL);
+ if (!qmp->vregs)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++)
+ qmp->vregs[i].supply = qmp->cfg->vreg_list[i];
+
+ return devm_regulator_bulk_get(dev, num, qmp->vregs);
+}
+
+static int qcom_qmp_phy_reset_init(struct device *dev)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ int i;
+
+ qmp->resets = devm_kcalloc(dev, qmp->cfg->num_resets,
+ sizeof(*qmp->resets), GFP_KERNEL);
+ if (!qmp->resets)
+ return -ENOMEM;
+
+ for (i = 0; i < qmp->cfg->num_resets; i++) {
+ struct reset_control *rst;
+ const char *name = qmp->cfg->reset_list[i];
+
+ rst = devm_reset_control_get(dev, name);
+ if (IS_ERR(rst)) {
+ dev_err(dev, "failed to get %s reset\n", name);
+ return PTR_ERR(rst);
+ }
+ qmp->resets[i] = rst;
+ }
+
+ return 0;
+}
+
+static int qcom_qmp_phy_clk_init(struct device *dev)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ int num = qmp->cfg->num_clks;
+ int i;
+
+ qmp->clks = devm_kcalloc(dev, num, sizeof(*qmp->clks), GFP_KERNEL);
+ if (!qmp->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++)
+ qmp->clks[i].id = qmp->cfg->clk_list[i];
+
+ return devm_clk_bulk_get(dev, num, qmp->clks);
+}
+
+/*
+ * Register a fixed rate pipe clock.
+ *
+ * The <s>_pipe_clksrc generated by PHY goes to the GCC that gate
+ * controls it. The <s>_pipe_clk coming out of the GCC is requested
+ * by the PHY driver for its operations.
+ * We register the <s>_pipe_clksrc here. The gcc driver takes care
+ * of assigning this <s>_pipe_clksrc as parent to <s>_pipe_clk.
+ * Below picture shows this relationship.
+ *
+ * +---------------+
+ * | PHY block |<<---------------------------------------+
+ * | | |
+ * | +-------+ | +-----+ |
+ * I/P---^-->| PLL |---^--->pipe_clksrc--->| GCC |--->pipe_clk---+
+ * clk | +-------+ | +-----+
+ * +---------------+
+ */
+static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np)
+{
+ struct clk_fixed_rate *fixed;
+ struct clk_init_data init = { };
+ int ret;
+
+ if ((qmp->cfg->type != PHY_TYPE_USB3) &&
+ (qmp->cfg->type != PHY_TYPE_PCIE)) {
+ /* not all phys register pipe clocks, so return success */
+ return 0;
+ }
+
+ ret = of_property_read_string(np, "clock-output-names", &init.name);
+ if (ret) {
+ dev_err(qmp->dev, "%s: No clock-output-names\n", np->name);
+ return ret;
+ }
+
+ fixed = devm_kzalloc(qmp->dev, sizeof(*fixed), GFP_KERNEL);
+ if (!fixed)
+ return -ENOMEM;
+
+ init.ops = &clk_fixed_rate_ops;
+
+ /* controllers using QMP phys use 125MHz pipe clock interface */
+ fixed->fixed_rate = 125000000;
+ fixed->hw.init = &init;
+
+ return devm_clk_hw_register(qmp->dev, &fixed->hw);
+}
+
+static const struct phy_ops qcom_qmp_phy_gen_ops = {
+ .init = qcom_qmp_phy_init,
+ .exit = qcom_qmp_phy_exit,
+ .set_mode = qcom_qmp_phy_set_mode,
+ .owner = THIS_MODULE,
+};
+
+static
+int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ struct phy *generic_phy;
+ struct qmp_phy *qphy;
+ char prop_name[MAX_PROP_NAME];
+ int ret;
+
+ qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL);
+ if (!qphy)
+ return -ENOMEM;
+
+ /*
+ * Get memory resources for each phy lane:
+ * Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2; and
+ * pcs_misc (optional) -> 3.
+ */
+ qphy->tx = of_iomap(np, 0);
+ if (!qphy->tx)
+ return -ENOMEM;
+
+ qphy->rx = of_iomap(np, 1);
+ if (!qphy->rx)
+ return -ENOMEM;
+
+ qphy->pcs = of_iomap(np, 2);
+ if (!qphy->pcs)
+ return -ENOMEM;
+
+ qphy->pcs_misc = of_iomap(np, 3);
+ if (!qphy->pcs_misc)
+ dev_vdbg(dev, "PHY pcs_misc-reg not used\n");
+
+ /*
+ * Get PHY's Pipe clock, if any. USB3 and PCIe are PIPE3
+ * based phys, so they essentially have pipe clock. So,
+ * we return error in case phy is USB3 or PIPE type.
+ * Otherwise, we initialize pipe clock to NULL for
+ * all phys that don't need this.
+ */
+ snprintf(prop_name, sizeof(prop_name), "pipe%d", id);
+ qphy->pipe_clk = of_clk_get_by_name(np, prop_name);
+ if (IS_ERR(qphy->pipe_clk)) {
+ if (qmp->cfg->type == PHY_TYPE_PCIE ||
+ qmp->cfg->type == PHY_TYPE_USB3) {
+ ret = PTR_ERR(qphy->pipe_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev,
+ "failed to get lane%d pipe_clk, %d\n",
+ id, ret);
+ return ret;
+ }
+ qphy->pipe_clk = NULL;
+ }
+
+ /* Get lane reset, if any */
+ if (qmp->cfg->has_lane_rst) {
+ snprintf(prop_name, sizeof(prop_name), "lane%d", id);
+ qphy->lane_rst = of_reset_control_get(np, prop_name);
+ if (IS_ERR(qphy->lane_rst)) {
+ dev_err(dev, "failed to get lane%d reset\n", id);
+ return PTR_ERR(qphy->lane_rst);
+ }
+ }
+
+ generic_phy = devm_phy_create(dev, np, &qcom_qmp_phy_gen_ops);
+ if (IS_ERR(generic_phy)) {
+ ret = PTR_ERR(generic_phy);
+ dev_err(dev, "failed to create qphy %d\n", ret);
+ return ret;
+ }
+
+ qphy->phy = generic_phy;
+ qphy->index = id;
+ qphy->qmp = qmp;
+ qmp->phys[id] = qphy;
+ phy_set_drvdata(generic_phy, qphy);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
+ {
+ .compatible = "qcom,msm8996-qmp-pcie-phy",
+ .data = &msm8996_pciephy_cfg,
+ }, {
+ .compatible = "qcom,msm8996-qmp-usb3-phy",
+ .data = &msm8996_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,ipq8074-qmp-pcie-phy",
+ .data = &ipq8074_pciephy_cfg,
+ }, {
+ .compatible = "qcom,sdm845-qmp-usb3-phy",
+ .data = &qmp_v3_usb3phy_cfg,
+ }, {
+<<<<<<<
+ .compatible = "qcom,sdm845-qmp-ufs-phy",
+ .data = &sdm845_ufsphy_cfg,
+=======
+ .compatible = "qcom,sdm845-qmp-usb3-uni-phy",
+ .data = &qmp_v3_usb3_uniphy_cfg,
+>>>>>>>
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, qcom_qmp_phy_of_match_table);
+
+static const struct dev_pm_ops qcom_qmp_phy_pm_ops = {
+ SET_RUNTIME_PM_OPS(qcom_qmp_phy_runtime_suspend,
+ qcom_qmp_phy_runtime_resume, NULL)
+};
+
+static int qcom_qmp_phy_probe(struct platform_device *pdev)
+{
+ struct qcom_qmp *qmp;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct device_node *child;
+ struct phy_provider *phy_provider;
+ void __iomem *base;
+ int num, id;
+ int ret;
+
+ qmp = devm_kzalloc(dev, sizeof(*qmp), GFP_KERNEL);
+ if (!qmp)
+ return -ENOMEM;
+
+ qmp->dev = dev;
+ dev_set_drvdata(dev, qmp);
+
+ /* Get the specific init parameters of QMP phy */
+ qmp->cfg = of_device_get_match_data(dev);
+ if (!qmp->cfg)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ /* per PHY serdes; usually located at base address */
+ qmp->serdes = base;
+
+ /* per PHY dp_com; if PHY has dp_com control block */
+ if (qmp->cfg->has_phy_dp_com_ctrl) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "dp_com");
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ qmp->dp_com = base;
+ }
+
+ mutex_init(&qmp->phy_mutex);
+
+ ret = qcom_qmp_phy_clk_init(dev);
+ if (ret)
+ return ret;
+
+ ret = qcom_qmp_phy_reset_init(dev);
+ if (ret)
+ return ret;
+
+ ret = qcom_qmp_phy_vreg_init(dev);
+ if (ret) {
+ dev_err(dev, "failed to get regulator supplies\n");
+ return ret;
+ }
+
+ num = of_get_available_child_count(dev->of_node);
+ /* do we have a rogue child node ? */
+ if (num > qmp->cfg->nlanes)
+ return -EINVAL;
+
+ qmp->phys = devm_kcalloc(dev, num, sizeof(*qmp->phys), GFP_KERNEL);
+ if (!qmp->phys)
+ return -ENOMEM;
+
+ id = 0;
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ /*
+ * Prevent runtime pm from being ON by default. Users can enable
+ * it using power/control in sysfs.
+ */
+ pm_runtime_forbid(dev);
+
+ for_each_available_child_of_node(dev->of_node, child) {
+ /* Create per-lane phy */
+ ret = qcom_qmp_phy_create(dev, child, id);
+ if (ret) {
+ dev_err(dev, "failed to create lane%d phy, %d\n",
+ id, ret);
+ pm_runtime_disable(dev);
+ return ret;
+ }
+
+ /*
+ * Register the pipe clock provided by phy.
+ * See function description to see details of this pipe clock.
+ */
+ ret = phy_pipe_clk_register(qmp, child);
+ if (ret) {
+ dev_err(qmp->dev,
+ "failed to register pipe clock source\n");
+ pm_runtime_disable(dev);
+ return ret;
+ }
+ id++;
+ }
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (!IS_ERR(phy_provider))
+ dev_info(dev, "Registered Qcom-QMP phy\n");
+ else
+ pm_runtime_disable(dev);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static struct platform_driver qcom_qmp_phy_driver = {
+ .probe = qcom_qmp_phy_probe,
+ .driver = {
+ .name = "qcom-qmp-phy",
+ .pm = &qcom_qmp_phy_pm_ops,
+ .of_match_table = qcom_qmp_phy_of_match_table,
+ },
+};
+
+module_platform_driver(qcom_qmp_phy_driver);
+
+MODULE_AUTHOR("Vivek Gautam <vivek.gautam@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm QMP PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/rr-cache/722f55cef1394c13414ff3d97a7f9594145c9179/thisimage.1 b/rr-cache/722f55cef1394c13414ff3d97a7f9594145c9179/thisimage.1
new file mode 100644
index 0000000..c812971
--- /dev/null
+++ b/rr-cache/722f55cef1394c13414ff3d97a7f9594145c9179/thisimage.1
@@ -0,0 +1,1777 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/phy/phy.h>
+
+#include "phy-qcom-qmp.h"
+
+/* QPHY_SW_RESET bit */
+#define SW_RESET BIT(0)
+/* QPHY_POWER_DOWN_CONTROL */
+#define SW_PWRDN BIT(0)
+#define REFCLK_DRV_DSBL BIT(1)
+/* QPHY_START_CONTROL bits */
+#define SERDES_START BIT(0)
+#define PCS_START BIT(1)
+#define PLL_READY_GATE_EN BIT(3)
+/* QPHY_PCS_STATUS bit */
+#define PHYSTATUS BIT(6)
+/* QPHY_COM_PCS_READY_STATUS bit */
+#define PCS_READY BIT(0)
+
+/* QPHY_V3_DP_COM_RESET_OVRD_CTRL register bits */
+/* DP PHY soft reset */
+#define SW_DPPHY_RESET BIT(0)
+/* mux to select DP PHY reset control, 0:HW control, 1: software reset */
+#define SW_DPPHY_RESET_MUX BIT(1)
+/* USB3 PHY soft reset */
+#define SW_USB3PHY_RESET BIT(2)
+/* mux to select USB3 PHY reset control, 0:HW control, 1: software reset */
+#define SW_USB3PHY_RESET_MUX BIT(3)
+
+/* QPHY_V3_DP_COM_PHY_MODE_CTRL register bits */
+#define USB3_MODE BIT(0) /* enables USB3 mode */
+#define DP_MODE BIT(1) /* enables DP mode */
+
+/* QPHY_PCS_AUTONOMOUS_MODE_CTRL register bits */
+#define ARCVR_DTCT_EN BIT(0)
+#define ALFPS_DTCT_EN BIT(1)
+#define ARCVR_DTCT_EVENT_SEL BIT(4)
+
+/* QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR register bits */
+#define IRQ_CLEAR BIT(0)
+
+/* QPHY_PCS_LFPS_RXTERM_IRQ_STATUS register bits */
+#define RCVR_DETECT BIT(0)
+
+/* QPHY_V3_PCS_MISC_CLAMP_ENABLE register bits */
+#define CLAMP_EN BIT(0) /* enables i/o clamp_n */
+
+#define PHY_INIT_COMPLETE_TIMEOUT 1000
+#define POWER_DOWN_DELAY_US_MIN 10
+#define POWER_DOWN_DELAY_US_MAX 11
+
+#define MAX_PROP_NAME 32
+
+struct qmp_phy_init_tbl {
+ unsigned int offset;
+ unsigned int val;
+ /*
+ * register part of layout ?
+ * if yes, then offset gives index in the reg-layout
+ */
+ int in_layout;
+};
+
+#define QMP_PHY_INIT_CFG(o, v) \
+ { \
+ .offset = o, \
+ .val = v, \
+ }
+
+#define QMP_PHY_INIT_CFG_L(o, v) \
+ { \
+ .offset = o, \
+ .val = v, \
+ .in_layout = 1, \
+ }
+
+/* set of registers with offsets different per-PHY */
+enum qphy_reg_layout {
+ /* Common block control registers */
+ QPHY_COM_SW_RESET,
+ QPHY_COM_POWER_DOWN_CONTROL,
+ QPHY_COM_START_CONTROL,
+ QPHY_COM_PCS_READY_STATUS,
+ /* PCS registers */
+ QPHY_PLL_LOCK_CHK_DLY_TIME,
+ QPHY_FLL_CNTRL1,
+ QPHY_FLL_CNTRL2,
+ QPHY_FLL_CNT_VAL_L,
+ QPHY_FLL_CNT_VAL_H_TOL,
+ QPHY_FLL_MAN_CODE,
+ QPHY_SW_RESET,
+ QPHY_START_CTRL,
+ QPHY_PCS_READY_STATUS,
+ QPHY_PCS_AUTONOMOUS_MODE_CTRL,
+ QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR,
+ QPHY_PCS_LFPS_RXTERM_IRQ_STATUS,
+};
+
+static const unsigned int pciephy_regs_layout[] = {
+ [QPHY_COM_SW_RESET] = 0x400,
+ [QPHY_COM_POWER_DOWN_CONTROL] = 0x404,
+ [QPHY_COM_START_CONTROL] = 0x408,
+ [QPHY_COM_PCS_READY_STATUS] = 0x448,
+ [QPHY_PLL_LOCK_CHK_DLY_TIME] = 0xa8,
+ [QPHY_FLL_CNTRL1] = 0xc4,
+ [QPHY_FLL_CNTRL2] = 0xc8,
+ [QPHY_FLL_CNT_VAL_L] = 0xcc,
+ [QPHY_FLL_CNT_VAL_H_TOL] = 0xd0,
+ [QPHY_FLL_MAN_CODE] = 0xd4,
+ [QPHY_SW_RESET] = 0x00,
+ [QPHY_START_CTRL] = 0x08,
+ [QPHY_PCS_READY_STATUS] = 0x174,
+};
+
+static const unsigned int usb3phy_regs_layout[] = {
+ [QPHY_FLL_CNTRL1] = 0xc0,
+ [QPHY_FLL_CNTRL2] = 0xc4,
+ [QPHY_FLL_CNT_VAL_L] = 0xc8,
+ [QPHY_FLL_CNT_VAL_H_TOL] = 0xcc,
+ [QPHY_FLL_MAN_CODE] = 0xd0,
+ [QPHY_SW_RESET] = 0x00,
+ [QPHY_START_CTRL] = 0x08,
+ [QPHY_PCS_READY_STATUS] = 0x17c,
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x0d4,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x0d8,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x178,
+};
+
+static const unsigned int qmp_v3_usb3phy_regs_layout[] = {
+ [QPHY_SW_RESET] = 0x00,
+ [QPHY_START_CTRL] = 0x08,
+ [QPHY_PCS_READY_STATUS] = 0x174,
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x0d8,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x0dc,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x170,
+};
+
+static const unsigned int ufsphy_regs_layout[] = {
+ [QPHY_START_CTRL] = 0x00,
+ [QPHY_PCS_READY_STATUS] = 0x168,
+};
+
+static const struct qmp_phy_init_tbl msm8996_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_ENABLE1, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x33),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x42),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x33),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_COM_RESCODE_DIV_NUM, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_EP_DIV, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_ENABLE1, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_RESCODE_DIV_NUM, 0x40),
+};
+
+static const struct qmp_phy_init_tbl msm8996_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45),
+ QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x06),
+};
+
+static const struct qmp_phy_init_tbl msm8996_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_ENABLES, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xdb),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_BAND, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN_HALF, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_LVL, 0x19),
+};
+
+static const struct qmp_phy_init_tbl msm8996_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_RX_IDLE_DTCT_CNTRL, 0x4c),
+ QMP_PHY_INIT_CFG(QPHY_PWRUP_RESET_DLY_TIME_AUXCLK, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_LP_WAKEUP_DLY_TIME_AUXCLK, 0x01),
+
+ QMP_PHY_INIT_CFG_L(QPHY_PLL_LOCK_CHK_DLY_TIME, 0x05),
+
+ QMP_PHY_INIT_CFG(QPHY_ENDPOINT_REFCLK_DRIVE, 0x05),
+ QMP_PHY_INIT_CFG(QPHY_POWER_DOWN_CONTROL, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_POWER_STATE_CONFIG4, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_POWER_STATE_CONFIG1, 0xa3),
+ QMP_PHY_INIT_CFG(QPHY_TXDEEMPH_M3P5DB_V0, 0x0e),
+};
+
+static const struct qmp_phy_init_tbl msm8996_usb3_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x04),
+ /* PLL and Loop filter settings */
+ QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_CTRL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_CFG, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x0a),
+ /* SSC settings */
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x07),
+};
+
+static const struct qmp_phy_init_tbl msm8996_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45),
+ QMP_PHY_INIT_CFG(QSERDES_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x06),
+};
+
+static const struct qmp_phy_init_tbl msm8996_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4c),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xbb),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_LVL, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x16),
+};
+
+static const struct qmp_phy_init_tbl msm8996_usb3_pcs_tbl[] = {
+ /* FLL settings */
+ QMP_PHY_INIT_CFG_L(QPHY_FLL_CNTRL2, 0x03),
+ QMP_PHY_INIT_CFG_L(QPHY_FLL_CNTRL1, 0x02),
+ QMP_PHY_INIT_CFG_L(QPHY_FLL_CNT_VAL_L, 0x09),
+ QMP_PHY_INIT_CFG_L(QPHY_FLL_CNT_VAL_H_TOL, 0x42),
+ QMP_PHY_INIT_CFG_L(QPHY_FLL_MAN_CODE, 0x85),
+
+ /* Lock Det settings */
+ QMP_PHY_INIT_CFG(QPHY_LOCK_DETECT_CONFIG1, 0xd1),
+ QMP_PHY_INIT_CFG(QPHY_LOCK_DETECT_CONFIG2, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_LOCK_DETECT_CONFIG3, 0x47),
+ QMP_PHY_INIT_CFG(QPHY_POWER_STATE_CONFIG2, 0x08),
+};
+
+static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_ENABLE1, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0xf),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x6),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0xf),
+ QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV, 0xa),
+ QMP_PHY_INIT_CFG(QSERDES_COM_RESETSM_CNTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0xa),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0xa),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x3),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0xD),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0xD04),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x33),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x2),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0xb),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CTRL_BY_PSM, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_CTRL, 0xa),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x2),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_EP_DIV, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x7),
+};
+
+static const struct qmp_phy_init_tbl ipq8074_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45),
+ QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x6),
+ QMP_PHY_INIT_CFG(QSERDES_TX_RES_CODE_LANE_OFFSET, 0x2),
+ QMP_PHY_INIT_CFG(QSERDES_TX_RCV_DETECT_LVL_2, 0x12),
+};
+
+static const struct qmp_phy_init_tbl ipq8074_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_ENABLES, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xdb),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x4),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN_HALF, 0x4),
+};
+
+static const struct qmp_phy_init_tbl ipq8074_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_ENDPOINT_REFCLK_DRIVE, 0x4),
+ QMP_PHY_INIT_CFG(QPHY_OSC_DTCT_ACTIONS, 0x0),
+ QMP_PHY_INIT_CFG(QPHY_PWRUP_RESET_DLY_TIME_AUXCLK, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB, 0x0),
+ QMP_PHY_INIT_CFG(QPHY_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_PLL_LOCK_CHK_DLY_TIME_AUXCLK_LSB, 0x0),
+ QMP_PHY_INIT_CFG(QPHY_LP_WAKEUP_DLY_TIME_AUXCLK, 0x40),
+ QMP_PHY_INIT_CFG_L(QPHY_PLL_LOCK_CHK_DLY_TIME, 0x73),
+ QMP_PHY_INIT_CFG(QPHY_RX_SIGDET_LVL, 0x99),
+ QMP_PHY_INIT_CFG(QPHY_TXDEEMPH_M6DB_V0, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_TXDEEMPH_M3P5DB_V0, 0xe),
+ QMP_PHY_INIT_CFG_L(QPHY_SW_RESET, 0x0),
+ QMP_PHY_INIT_CFG_L(QPHY_START_CTRL, 0x3),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xc9),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_CFG, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_BUF_ENABLE, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE1, 0x85),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE2, 0x07),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x06),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x75),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_pcs_tbl[] = {
+ /* FLL settings */
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02),
+
+ /* Lock Det settings */
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG2, 0x1b),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0xba),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V0, 0x9f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V1, 0x9f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V2, 0xb7),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V3, 0x4e),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V4, 0x65),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_LS, 0x6b),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V1, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V1, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V2, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V2, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V3, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V3, 0x1d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V4, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V4, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_LS, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_LS, 0x0d),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RATE_SLEW_CNTRL, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
+};
+
+<<<<<<<
+static const struct qmp_phy_init_tbl qmp_v3_usb3_uniphy_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xc9),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_CFG, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_BUF_ENABLE, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE1, 0x85),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE2, 0x07),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_uniphy_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0xc6),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x06),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_uniphy_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_VGA_CAL_CNTRL2, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_00, 0x50),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x75),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_uniphy_pcs_tbl[] = {
+ /* FLL settings */
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02),
+
+ /* Lock Det settings */
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG2, 0x1b),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0xba),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V0, 0x9f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V1, 0x9f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V2, 0xb5),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V3, 0x4c),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V4, 0x64),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_LS, 0x6a),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V1, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V1, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V2, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V2, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V3, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V3, 0x1d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V4, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V4, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_LS, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_LS, 0x0d),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RATE_SLEW_CNTRL, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_REFGEN_REQ_CONFIG1, 0x21),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_REFGEN_REQ_CONFIG2, 0x60),
+};
+
+
+=======
+static const struct qmp_phy_init_tbl ufsphy_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BG_TIMER, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0xd5),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_CTRL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_INITVAL1, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_INITVAL2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xda),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE1, 0x98),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE1, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE1, 0xc1),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE1, 0x32),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE1, 0x0f),
+
+ /*Rate B*/
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x44),
+};
+
+static const struct qmp_phy_init_tbl ufsphy_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x06),
+};
+
+static const struct qmp_phy_init_tbl ufsphy_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_LVL, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_INTERFACE_MODE, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_TERM_BW, 0x5b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1d),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SVS_SO_GAIN_HALF, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SVS_SO_GAIN_QUARTER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SVS_SO_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_PI_CONTROLS, 0x81),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW, 0x80),
+};
+
+static const struct qmp_phy_init_tbl ufsphy_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_DOWN_CONTROL, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_MULTI_LANE_CTRL1, 0x02),
+};
+
+>>>>>>>
+/* struct qmp_phy_cfg - per-PHY initialization config */
+struct qmp_phy_cfg {
+ /* phy-type - PCIE/UFS/USB */
+ unsigned int type;
+ /* number of lanes provided by phy */
+ int nlanes;
+
+ /* Init sequence for PHY blocks - serdes, tx, rx, pcs */
+ const struct qmp_phy_init_tbl *serdes_tbl;
+ int serdes_tbl_num;
+ const struct qmp_phy_init_tbl *tx_tbl;
+ int tx_tbl_num;
+ const struct qmp_phy_init_tbl *rx_tbl;
+ int rx_tbl_num;
+ const struct qmp_phy_init_tbl *pcs_tbl;
+ int pcs_tbl_num;
+
+ /* clock ids to be requested */
+ const char * const *clk_list;
+ int num_clks;
+ /* resets to be requested */
+ const char * const *reset_list;
+ int num_resets;
+ /* regulators to be requested */
+ const char * const *vreg_list;
+ int num_vregs;
+
+ /* array of registers with different offsets */
+ const unsigned int *regs;
+
+ unsigned int start_ctrl;
+ unsigned int pwrdn_ctrl;
+ unsigned int mask_pcs_ready;
+ unsigned int mask_com_pcs_ready;
+
+ /* true, if PHY has a separate PHY_COM control block */
+ bool has_phy_com_ctrl;
+ /* true, if PHY has a reset for individual lanes */
+ bool has_lane_rst;
+ /* true, if PHY needs delay after POWER_DOWN */
+ bool has_pwrdn_delay;
+ /* power_down delay in usec */
+ int pwrdn_delay_min;
+ int pwrdn_delay_max;
+
+ /* true, if PHY has a separate DP_COM control block */
+ bool has_phy_dp_com_ctrl;
+ /* Register offset of secondary tx/rx lanes for USB DP combo PHY */
+ unsigned int tx_b_lane_offset;
+ unsigned int rx_b_lane_offset;
+
+ /* true, if PCS block has a separate SW_RESET register */
+ bool has_sw_rst;
+};
+
+/**
+ * struct qmp_phy - per-lane phy descriptor
+ *
+ * @phy: generic phy
+ * @tx: iomapped memory space for lane's tx
+ * @rx: iomapped memory space for lane's rx
+ * @pcs: iomapped memory space for lane's pcs
+ * @pcs_misc: iomapped memory space for lane's pcs_misc
+ * @pipe_clk: pipe lock
+ * @index: lane index
+ * @qmp: QMP phy to which this lane belongs
+ * @lane_rst: lane's reset controller
+ */
+struct qmp_phy {
+ struct phy *phy;
+ void __iomem *tx;
+ void __iomem *rx;
+ void __iomem *pcs;
+ void __iomem *pcs_misc;
+ struct clk *pipe_clk;
+ unsigned int index;
+ struct qcom_qmp *qmp;
+ struct reset_control *lane_rst;
+};
+
+/**
+ * struct qcom_qmp - structure holding QMP phy block attributes
+ *
+ * @dev: device
+ * @serdes: iomapped memory space for phy's serdes
+ * @dp_com: iomapped memory space for phy's dp_com control block
+ *
+ * @clks: array of clocks required by phy
+ * @resets: array of resets required by phy
+ * @vregs: regulator supplies bulk data
+ *
+ * @cfg: phy specific configuration
+ * @phys: array of per-lane phy descriptors
+ * @phy_mutex: mutex lock for PHY common block initialization
+ * @init_count: phy common block initialization count
+ * @phy_initialized: indicate if PHY has been initialized
+ * @mode: current PHY mode
+ */
+struct qcom_qmp {
+ struct device *dev;
+ void __iomem *serdes;
+ void __iomem *dp_com;
+
+ struct clk_bulk_data *clks;
+ struct reset_control **resets;
+ struct regulator_bulk_data *vregs;
+
+ const struct qmp_phy_cfg *cfg;
+ struct qmp_phy **phys;
+
+ struct mutex phy_mutex;
+ int init_count;
+ bool phy_initialized;
+ enum phy_mode mode;
+};
+
+static inline void qphy_setbits(void __iomem *base, u32 offset, u32 val)
+{
+ u32 reg;
+
+ reg = readl(base + offset);
+ reg |= val;
+ writel(reg, base + offset);
+
+ /* ensure that above write is through */
+ readl(base + offset);
+}
+
+static inline void qphy_clrbits(void __iomem *base, u32 offset, u32 val)
+{
+ u32 reg;
+
+ reg = readl(base + offset);
+ reg &= ~val;
+ writel(reg, base + offset);
+
+ /* ensure that above write is through */
+ readl(base + offset);
+}
+
+/* list of clocks required by phy */
+static const char * const msm8996_phy_clk_l[] = {
+ "aux", "cfg_ahb", "ref",
+};
+
+static const char * const qmp_v3_phy_clk_l[] = {
+ "aux", "cfg_ahb", "ref", "com_aux",
+};
+
+static const char * const sdm845_ufs_phy_clk_l[] = {
+ "ref_clk", "ref_aux_clk",
+};
+
+/* list of resets */
+static const char * const msm8996_pciephy_reset_l[] = {
+ "phy", "common", "cfg",
+};
+
+static const char * const msm8996_usb3phy_reset_l[] = {
+ "phy", "common",
+};
+
+/* list of regulators */
+static const char * const msm8996_phy_vreg_l[] = {
+ "vdda-phy", "vdda-pll",
+};
+
+static const struct qmp_phy_cfg msm8996_pciephy_cfg = {
+ .type = PHY_TYPE_PCIE,
+ .nlanes = 3,
+
+ .serdes_tbl = msm8996_pcie_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(msm8996_pcie_serdes_tbl),
+ .tx_tbl = msm8996_pcie_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(msm8996_pcie_tx_tbl),
+ .rx_tbl = msm8996_pcie_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(msm8996_pcie_rx_tbl),
+ .pcs_tbl = msm8996_pcie_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(msm8996_pcie_pcs_tbl),
+ .clk_list = msm8996_phy_clk_l,
+ .num_clks = ARRAY_SIZE(msm8996_phy_clk_l),
+ .reset_list = msm8996_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_pciephy_reset_l),
+ .vreg_list = msm8996_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(msm8996_phy_vreg_l),
+ .regs = pciephy_regs_layout,
+
+ .start_ctrl = PCS_START | PLL_READY_GATE_EN,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .mask_com_pcs_ready = PCS_READY,
+
+ .has_phy_com_ctrl = true,
+ .has_lane_rst = true,
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+ .has_sw_rst = true,
+};
+
+static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = msm8996_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(msm8996_usb3_serdes_tbl),
+ .tx_tbl = msm8996_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(msm8996_usb3_tx_tbl),
+ .rx_tbl = msm8996_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(msm8996_usb3_rx_tbl),
+ .pcs_tbl = msm8996_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(msm8996_usb3_pcs_tbl),
+ .clk_list = msm8996_phy_clk_l,
+ .num_clks = ARRAY_SIZE(msm8996_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = msm8996_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(msm8996_phy_vreg_l),
+ .regs = usb3phy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .mask_pcs_ready = PHYSTATUS,
+ .has_sw_rst = true,
+};
+
+/* list of resets */
+static const char * const ipq8074_pciephy_reset_l[] = {
+ "phy", "common",
+};
+
+static const struct qmp_phy_cfg ipq8074_pciephy_cfg = {
+ .type = PHY_TYPE_PCIE,
+ .nlanes = 1,
+
+ .serdes_tbl = ipq8074_pcie_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(ipq8074_pcie_serdes_tbl),
+ .tx_tbl = ipq8074_pcie_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(ipq8074_pcie_tx_tbl),
+ .rx_tbl = ipq8074_pcie_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(ipq8074_pcie_rx_tbl),
+ .pcs_tbl = ipq8074_pcie_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(ipq8074_pcie_pcs_tbl),
+ .clk_list = NULL,
+ .num_clks = 0,
+ .reset_list = ipq8074_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l),
+ .vreg_list = NULL,
+ .num_vregs = 0,
+ .regs = pciephy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .mask_pcs_ready = PHYSTATUS,
+
+ .has_phy_com_ctrl = false,
+ .has_lane_rst = false,
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = 995, /* us */
+ .pwrdn_delay_max = 1005, /* us */
+ .has_sw_rst = true,
+};
+
+static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = qmp_v3_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_serdes_tbl),
+ .tx_tbl = qmp_v3_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_tx_tbl),
+ .rx_tbl = qmp_v3_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_rx_tbl),
+ .pcs_tbl = qmp_v3_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(qmp_v3_usb3_pcs_tbl),
+ .clk_list = qmp_v3_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = msm8996_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(msm8996_phy_vreg_l),
+ .regs = qmp_v3_usb3phy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .mask_pcs_ready = PHYSTATUS,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+
+ .has_phy_dp_com_ctrl = true,
+ .tx_b_lane_offset = 0x400,
+ .rx_b_lane_offset = 0x400,
+ .has_sw_rst = true,
+};
+
+static const struct qmp_phy_cfg sdm845_ufsphy_cfg = {
+ .type = PHY_TYPE_UFS,
+ .nlanes = 2,
+
+ .serdes_tbl = ufsphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(ufsphy_serdes_tbl),
+ .tx_tbl = ufsphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(ufsphy_tx_tbl),
+ .rx_tbl = ufsphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(ufsphy_rx_tbl),
+ .pcs_tbl = ufsphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(ufsphy_pcs_tbl),
+ .clk_list = sdm845_ufs_phy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l),
+ .vreg_list = msm8996_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(msm8996_phy_vreg_l),
+ .regs = ufsphy_regs_layout,
+
+ .start_ctrl = SERDES_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .mask_pcs_ready = PCS_READY,
+};
+
+static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = qmp_v3_usb3_uniphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_uniphy_serdes_tbl),
+ .tx_tbl = qmp_v3_usb3_uniphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_uniphy_tx_tbl),
+ .rx_tbl = qmp_v3_usb3_uniphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_uniphy_rx_tbl),
+ .pcs_tbl = qmp_v3_usb3_uniphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(qmp_v3_usb3_uniphy_pcs_tbl),
+ .clk_list = qmp_v3_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = msm8996_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(msm8996_phy_vreg_l),
+ .regs = qmp_v3_usb3phy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .mask_pcs_ready = PHYSTATUS,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+};
+
+static void qcom_qmp_phy_configure(void __iomem *base,
+ const unsigned int *regs,
+ const struct qmp_phy_init_tbl tbl[],
+ int num)
+{
+ int i;
+ const struct qmp_phy_init_tbl *t = tbl;
+
+ if (!t)
+ return;
+
+ for (i = 0; i < num; i++, t++) {
+ if (t->in_layout)
+ writel(t->val, base + regs[t->offset]);
+ else
+ writel(t->val, base + t->offset);
+ }
+}
+
+static int qcom_qmp_phy_com_init(struct qcom_qmp *qmp)
+{
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *serdes = qmp->serdes;
+ void __iomem *dp_com = qmp->dp_com;
+ int ret, i;
+
+ mutex_lock(&qmp->phy_mutex);
+ if (qmp->init_count++) {
+ mutex_unlock(&qmp->phy_mutex);
+ return 0;
+ }
+
+ /* turn on regulator supplies */
+ ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs);
+ if (ret) {
+ dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret);
+ goto err_reg_enable;
+ }
+
+ for (i = 0; i < cfg->num_resets; i++) {
+ ret = reset_control_assert(qmp->resets[i]);
+ if (ret) {
+ dev_err(qmp->dev, "%s reset assert failed\n",
+ cfg->reset_list[i]);
+ goto err_rst_assert;
+ }
+ }
+
+ for (i = cfg->num_resets - 1; i >= 0; i--) {
+ ret = reset_control_deassert(qmp->resets[i]);
+ if (ret) {
+ dev_err(qmp->dev, "%s reset deassert failed\n",
+ qmp->cfg->reset_list[i]);
+ goto err_rst;
+ }
+ }
+
+ ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks);
+ if (ret) {
+ dev_err(qmp->dev, "failed to enable clks, err=%d\n", ret);
+ goto err_rst;
+ }
+
+ if (cfg->has_phy_com_ctrl)
+ qphy_setbits(serdes, cfg->regs[QPHY_COM_POWER_DOWN_CONTROL],
+ SW_PWRDN);
+
+ if (cfg->has_phy_dp_com_ctrl) {
+ qphy_setbits(dp_com, QPHY_V3_DP_COM_POWER_DOWN_CTRL,
+ SW_PWRDN);
+ /* override hardware control for reset of qmp phy */
+ qphy_setbits(dp_com, QPHY_V3_DP_COM_RESET_OVRD_CTRL,
+ SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
+ SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
+
+ qphy_setbits(dp_com, QPHY_V3_DP_COM_PHY_MODE_CTRL,
+ USB3_MODE | DP_MODE);
+
+ /* bring both QMP USB and QMP DP PHYs PCS block out of reset */
+ qphy_clrbits(dp_com, QPHY_V3_DP_COM_RESET_OVRD_CTRL,
+ SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
+ SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
+ }
+
+ /* Serdes configuration */
+ qcom_qmp_phy_configure(serdes, cfg->regs, cfg->serdes_tbl,
+ cfg->serdes_tbl_num);
+
+ if (cfg->has_phy_com_ctrl) {
+ void __iomem *status;
+ unsigned int mask, val;
+
+ if (cfg->has_sw_rst)
+ qphy_clrbits(serdes, cfg->regs[QPHY_COM_SW_RESET],
+ SW_RESET);
+ qphy_setbits(serdes, cfg->regs[QPHY_COM_START_CONTROL],
+ SERDES_START | PCS_START);
+
+ status = serdes + cfg->regs[QPHY_COM_PCS_READY_STATUS];
+ mask = cfg->mask_com_pcs_ready;
+
+ ret = readl_poll_timeout(status, val, (val & mask), 10,
+ PHY_INIT_COMPLETE_TIMEOUT);
+ if (ret) {
+ dev_err(qmp->dev,
+ "phy common block init timed-out\n");
+ goto err_com_init;
+ }
+ }
+
+ mutex_unlock(&qmp->phy_mutex);
+
+ return 0;
+
+err_com_init:
+ clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+err_rst:
+ while (++i < cfg->num_resets)
+ reset_control_assert(qmp->resets[i]);
+err_rst_assert:
+ regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
+err_reg_enable:
+ mutex_unlock(&qmp->phy_mutex);
+
+ return ret;
+}
+
+static int qcom_qmp_phy_com_exit(struct qcom_qmp *qmp)
+{
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *serdes = qmp->serdes;
+ int i = cfg->num_resets;
+
+ mutex_lock(&qmp->phy_mutex);
+ if (--qmp->init_count) {
+ mutex_unlock(&qmp->phy_mutex);
+ return 0;
+ }
+
+ if (cfg->has_phy_com_ctrl) {
+ qphy_setbits(serdes, cfg->regs[QPHY_COM_START_CONTROL],
+ SERDES_START | PCS_START);
+ if (cfg->has_sw_rst)
+ qphy_clrbits(serdes, cfg->regs[QPHY_COM_SW_RESET],
+ SW_RESET);
+ qphy_setbits(serdes, cfg->regs[QPHY_COM_POWER_DOWN_CONTROL],
+ SW_PWRDN);
+ }
+
+ while (--i >= 0)
+ reset_control_assert(qmp->resets[i]);
+
+ clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+
+ regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
+
+ mutex_unlock(&qmp->phy_mutex);
+
+ return 0;
+}
+
+/* PHY Initialization */
+static int qcom_qmp_phy_init(struct phy *phy)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *tx = qphy->tx;
+ void __iomem *rx = qphy->rx;
+ void __iomem *pcs = qphy->pcs;
+ void __iomem *dp_com = qmp->dp_com;
+ void __iomem *status;
+ unsigned int mask, val;
+ int ret;
+
+ dev_vdbg(qmp->dev, "Initializing QMP phy\n");
+
+ ret = qcom_qmp_phy_com_init(qmp);
+ if (ret)
+ return ret;
+
+ if (cfg->has_lane_rst) {
+ ret = reset_control_deassert(qphy->lane_rst);
+ if (ret) {
+ dev_err(qmp->dev, "lane%d reset deassert failed\n",
+ qphy->index);
+ goto err_lane_rst;
+ }
+ }
+
+ ret = clk_prepare_enable(qphy->pipe_clk);
+ if (ret) {
+ dev_err(qmp->dev, "pipe_clk enable failed err=%d\n", ret);
+ goto err_clk_enable;
+ }
+
+ /* Tx, Rx, and PCS configurations */
+ qcom_qmp_phy_configure(tx, cfg->regs, cfg->tx_tbl, cfg->tx_tbl_num);
+ /* Configuration for other LANE for USB-DP combo PHY */
+ if (cfg->has_phy_dp_com_ctrl)
+ qcom_qmp_phy_configure(tx + cfg->tx_b_lane_offset, cfg->regs,
+ cfg->tx_tbl, cfg->tx_tbl_num);
+
+ qcom_qmp_phy_configure(rx, cfg->regs, cfg->rx_tbl, cfg->rx_tbl_num);
+ if (cfg->has_phy_dp_com_ctrl)
+ qcom_qmp_phy_configure(rx + cfg->rx_b_lane_offset, cfg->regs,
+ cfg->rx_tbl, cfg->rx_tbl_num);
+
+ qcom_qmp_phy_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+
+ /*
+ * Pull out PHY from POWER DOWN state.
+ * This is active low enable signal to power-down PHY.
+ */
+ qphy_setbits(pcs, QPHY_POWER_DOWN_CONTROL, cfg->pwrdn_ctrl);
+
+ if (cfg->has_pwrdn_delay)
+ usleep_range(cfg->pwrdn_delay_min, cfg->pwrdn_delay_max);
+
+ /* Pull PHY out of reset state */
+ if (cfg->has_sw_rst)
+ qphy_clrbits(pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+ if (cfg->has_phy_dp_com_ctrl)
+ qphy_clrbits(dp_com, QPHY_V3_DP_COM_SW_RESET, SW_RESET);
+
+ /* start SerDes and Phy-Coding-Sublayer */
+ qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
+
+ status = pcs + cfg->regs[QPHY_PCS_READY_STATUS];
+ mask = cfg->mask_pcs_ready;
+
+ ret = readl_poll_timeout(status, val, !(val & mask), 1,
+ PHY_INIT_COMPLETE_TIMEOUT);
+ if (ret) {
+ dev_err(qmp->dev, "phy initialization timed-out\n");
+ goto err_pcs_ready;
+ }
+ qmp->phy_initialized = true;
+
+ return ret;
+
+err_pcs_ready:
+ clk_disable_unprepare(qphy->pipe_clk);
+err_clk_enable:
+ if (cfg->has_lane_rst)
+ reset_control_assert(qphy->lane_rst);
+err_lane_rst:
+ qcom_qmp_phy_com_exit(qmp);
+
+ return ret;
+}
+
+static int qcom_qmp_phy_exit(struct phy *phy)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+
+ clk_disable_unprepare(qphy->pipe_clk);
+
+ /* PHY reset */
+ if (cfg->has_sw_rst)
+ qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+
+ /* stop SerDes and Phy-Coding-Sublayer */
+ qphy_clrbits(qphy->pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
+
+ /* Put PHY into POWER DOWN state: active low */
+ qphy_clrbits(qphy->pcs, QPHY_POWER_DOWN_CONTROL, cfg->pwrdn_ctrl);
+
+ if (cfg->has_lane_rst)
+ reset_control_assert(qphy->lane_rst);
+
+ qcom_qmp_phy_com_exit(qmp);
+
+ qmp->phy_initialized = false;
+
+ return 0;
+}
+
+static int qcom_qmp_phy_set_mode(struct phy *phy, enum phy_mode mode)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ struct qcom_qmp *qmp = qphy->qmp;
+
+ qmp->mode = mode;
+
+ return 0;
+}
+
+static void qcom_qmp_phy_enable_autonomous_mode(struct qmp_phy *qphy)
+{
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *pcs = qphy->pcs;
+ void __iomem *pcs_misc = qphy->pcs_misc;
+ u32 intr_mask;
+
+ if (qmp->mode == PHY_MODE_USB_HOST_SS ||
+ qmp->mode == PHY_MODE_USB_DEVICE_SS)
+ intr_mask = ARCVR_DTCT_EN | ALFPS_DTCT_EN;
+ else
+ intr_mask = ARCVR_DTCT_EN | ARCVR_DTCT_EVENT_SEL;
+
+ /* Clear any pending interrupts status */
+ qphy_setbits(pcs, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
+ /* Writing 1 followed by 0 clears the interrupt */
+ qphy_clrbits(pcs, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
+
+ qphy_clrbits(pcs, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL],
+ ARCVR_DTCT_EN | ALFPS_DTCT_EN | ARCVR_DTCT_EVENT_SEL);
+
+ /* Enable required PHY autonomous mode interrupts */
+ qphy_setbits(pcs, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL], intr_mask);
+
+ /* Enable i/o clamp_n for autonomous mode */
+ if (pcs_misc)
+ qphy_clrbits(pcs_misc, QPHY_V3_PCS_MISC_CLAMP_ENABLE, CLAMP_EN);
+}
+
+static void qcom_qmp_phy_disable_autonomous_mode(struct qmp_phy *qphy)
+{
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *pcs = qphy->pcs;
+ void __iomem *pcs_misc = qphy->pcs_misc;
+
+ /* Disable i/o clamp_n on resume for normal mode */
+ if (pcs_misc)
+ qphy_setbits(pcs_misc, QPHY_V3_PCS_MISC_CLAMP_ENABLE, CLAMP_EN);
+
+ qphy_clrbits(pcs, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL],
+ ARCVR_DTCT_EN | ARCVR_DTCT_EVENT_SEL | ALFPS_DTCT_EN);
+
+ qphy_setbits(pcs, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
+ /* Writing 1 followed by 0 clears the interrupt */
+ qphy_clrbits(pcs, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
+}
+
+static int __maybe_unused qcom_qmp_phy_runtime_suspend(struct device *dev)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ struct qmp_phy *qphy = qmp->phys[0];
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+
+ dev_vdbg(dev, "Suspending QMP phy, mode:%d\n", qmp->mode);
+
+ /* Supported only for USB3 PHY */
+ if (cfg->type != PHY_TYPE_USB3)
+ return 0;
+
+ if (!qmp->phy_initialized) {
+ dev_vdbg(dev, "PHY not initialized, bailing out\n");
+ return 0;
+ }
+
+ qcom_qmp_phy_enable_autonomous_mode(qphy);
+
+ clk_disable_unprepare(qphy->pipe_clk);
+ clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+
+ return 0;
+}
+
+static int __maybe_unused qcom_qmp_phy_runtime_resume(struct device *dev)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ struct qmp_phy *qphy = qmp->phys[0];
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ int ret = 0;
+
+ dev_vdbg(dev, "Resuming QMP phy, mode:%d\n", qmp->mode);
+
+ /* Supported only for USB3 PHY */
+ if (cfg->type != PHY_TYPE_USB3)
+ return 0;
+
+ if (!qmp->phy_initialized) {
+ dev_vdbg(dev, "PHY not initialized, bailing out\n");
+ return 0;
+ }
+
+ ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks);
+ if (ret) {
+ dev_err(qmp->dev, "failed to enable clks, err=%d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(qphy->pipe_clk);
+ if (ret) {
+ dev_err(dev, "pipe_clk enable failed, err=%d\n", ret);
+ clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+ return ret;
+ }
+
+ qcom_qmp_phy_disable_autonomous_mode(qphy);
+
+ return 0;
+}
+
+static int qcom_qmp_phy_vreg_init(struct device *dev)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ int num = qmp->cfg->num_vregs;
+ int i;
+
+ qmp->vregs = devm_kcalloc(dev, num, sizeof(*qmp->vregs), GFP_KERNEL);
+ if (!qmp->vregs)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++)
+ qmp->vregs[i].supply = qmp->cfg->vreg_list[i];
+
+ return devm_regulator_bulk_get(dev, num, qmp->vregs);
+}
+
+static int qcom_qmp_phy_reset_init(struct device *dev)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ int i;
+
+ qmp->resets = devm_kcalloc(dev, qmp->cfg->num_resets,
+ sizeof(*qmp->resets), GFP_KERNEL);
+ if (!qmp->resets)
+ return -ENOMEM;
+
+ for (i = 0; i < qmp->cfg->num_resets; i++) {
+ struct reset_control *rst;
+ const char *name = qmp->cfg->reset_list[i];
+
+ rst = devm_reset_control_get(dev, name);
+ if (IS_ERR(rst)) {
+ dev_err(dev, "failed to get %s reset\n", name);
+ return PTR_ERR(rst);
+ }
+ qmp->resets[i] = rst;
+ }
+
+ return 0;
+}
+
+static int qcom_qmp_phy_clk_init(struct device *dev)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ int num = qmp->cfg->num_clks;
+ int i;
+
+ qmp->clks = devm_kcalloc(dev, num, sizeof(*qmp->clks), GFP_KERNEL);
+ if (!qmp->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++)
+ qmp->clks[i].id = qmp->cfg->clk_list[i];
+
+ return devm_clk_bulk_get(dev, num, qmp->clks);
+}
+
+/*
+ * Register a fixed rate pipe clock.
+ *
+ * The <s>_pipe_clksrc generated by PHY goes to the GCC that gate
+ * controls it. The <s>_pipe_clk coming out of the GCC is requested
+ * by the PHY driver for its operations.
+ * We register the <s>_pipe_clksrc here. The gcc driver takes care
+ * of assigning this <s>_pipe_clksrc as parent to <s>_pipe_clk.
+ * Below picture shows this relationship.
+ *
+ * +---------------+
+ * | PHY block |<<---------------------------------------+
+ * | | |
+ * | +-------+ | +-----+ |
+ * I/P---^-->| PLL |---^--->pipe_clksrc--->| GCC |--->pipe_clk---+
+ * clk | +-------+ | +-----+
+ * +---------------+
+ */
+static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np)
+{
+ struct clk_fixed_rate *fixed;
+ struct clk_init_data init = { };
+ int ret;
+
+ if ((qmp->cfg->type != PHY_TYPE_USB3) &&
+ (qmp->cfg->type != PHY_TYPE_PCIE)) {
+ /* not all phys register pipe clocks, so return success */
+ return 0;
+ }
+
+ ret = of_property_read_string(np, "clock-output-names", &init.name);
+ if (ret) {
+ dev_err(qmp->dev, "%s: No clock-output-names\n", np->name);
+ return ret;
+ }
+
+ fixed = devm_kzalloc(qmp->dev, sizeof(*fixed), GFP_KERNEL);
+ if (!fixed)
+ return -ENOMEM;
+
+ init.ops = &clk_fixed_rate_ops;
+
+ /* controllers using QMP phys use 125MHz pipe clock interface */
+ fixed->fixed_rate = 125000000;
+ fixed->hw.init = &init;
+
+ return devm_clk_hw_register(qmp->dev, &fixed->hw);
+}
+
+static const struct phy_ops qcom_qmp_phy_gen_ops = {
+ .init = qcom_qmp_phy_init,
+ .exit = qcom_qmp_phy_exit,
+ .set_mode = qcom_qmp_phy_set_mode,
+ .owner = THIS_MODULE,
+};
+
+static
+int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ struct phy *generic_phy;
+ struct qmp_phy *qphy;
+ char prop_name[MAX_PROP_NAME];
+ int ret;
+
+ qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL);
+ if (!qphy)
+ return -ENOMEM;
+
+ /*
+ * Get memory resources for each phy lane:
+ * Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2; and
+ * pcs_misc (optional) -> 3.
+ */
+ qphy->tx = of_iomap(np, 0);
+ if (!qphy->tx)
+ return -ENOMEM;
+
+ qphy->rx = of_iomap(np, 1);
+ if (!qphy->rx)
+ return -ENOMEM;
+
+ qphy->pcs = of_iomap(np, 2);
+ if (!qphy->pcs)
+ return -ENOMEM;
+
+ qphy->pcs_misc = of_iomap(np, 3);
+ if (!qphy->pcs_misc)
+ dev_vdbg(dev, "PHY pcs_misc-reg not used\n");
+
+ /*
+ * Get PHY's Pipe clock, if any. USB3 and PCIe are PIPE3
+ * based phys, so they essentially have pipe clock. So,
+ * we return error in case phy is USB3 or PIPE type.
+ * Otherwise, we initialize pipe clock to NULL for
+ * all phys that don't need this.
+ */
+ snprintf(prop_name, sizeof(prop_name), "pipe%d", id);
+ qphy->pipe_clk = of_clk_get_by_name(np, prop_name);
+ if (IS_ERR(qphy->pipe_clk)) {
+ if (qmp->cfg->type == PHY_TYPE_PCIE ||
+ qmp->cfg->type == PHY_TYPE_USB3) {
+ ret = PTR_ERR(qphy->pipe_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev,
+ "failed to get lane%d pipe_clk, %d\n",
+ id, ret);
+ return ret;
+ }
+ qphy->pipe_clk = NULL;
+ }
+
+ /* Get lane reset, if any */
+ if (qmp->cfg->has_lane_rst) {
+ snprintf(prop_name, sizeof(prop_name), "lane%d", id);
+ qphy->lane_rst = of_reset_control_get(np, prop_name);
+ if (IS_ERR(qphy->lane_rst)) {
+ dev_err(dev, "failed to get lane%d reset\n", id);
+ return PTR_ERR(qphy->lane_rst);
+ }
+ }
+
+ generic_phy = devm_phy_create(dev, np, &qcom_qmp_phy_gen_ops);
+ if (IS_ERR(generic_phy)) {
+ ret = PTR_ERR(generic_phy);
+ dev_err(dev, "failed to create qphy %d\n", ret);
+ return ret;
+ }
+
+ qphy->phy = generic_phy;
+ qphy->index = id;
+ qphy->qmp = qmp;
+ qmp->phys[id] = qphy;
+ phy_set_drvdata(generic_phy, qphy);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
+ {
+ .compatible = "qcom,msm8996-qmp-pcie-phy",
+ .data = &msm8996_pciephy_cfg,
+ }, {
+ .compatible = "qcom,msm8996-qmp-usb3-phy",
+ .data = &msm8996_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,ipq8074-qmp-pcie-phy",
+ .data = &ipq8074_pciephy_cfg,
+ }, {
+ .compatible = "qcom,sdm845-qmp-usb3-phy",
+ .data = &qmp_v3_usb3phy_cfg,
+ }, {
+<<<<<<<
+ .compatible = "qcom,sdm845-qmp-ufs-phy",
+ .data = &sdm845_ufsphy_cfg,
+=======
+ .compatible = "qcom,sdm845-qmp-usb3-uni-phy",
+ .data = &qmp_v3_usb3_uniphy_cfg,
+>>>>>>>
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, qcom_qmp_phy_of_match_table);
+
+static const struct dev_pm_ops qcom_qmp_phy_pm_ops = {
+ SET_RUNTIME_PM_OPS(qcom_qmp_phy_runtime_suspend,
+ qcom_qmp_phy_runtime_resume, NULL)
+};
+
+static int qcom_qmp_phy_probe(struct platform_device *pdev)
+{
+ struct qcom_qmp *qmp;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct device_node *child;
+ struct phy_provider *phy_provider;
+ void __iomem *base;
+ int num, id;
+ int ret;
+
+ qmp = devm_kzalloc(dev, sizeof(*qmp), GFP_KERNEL);
+ if (!qmp)
+ return -ENOMEM;
+
+ qmp->dev = dev;
+ dev_set_drvdata(dev, qmp);
+
+ /* Get the specific init parameters of QMP phy */
+ qmp->cfg = of_device_get_match_data(dev);
+ if (!qmp->cfg)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ /* per PHY serdes; usually located at base address */
+ qmp->serdes = base;
+
+ /* per PHY dp_com; if PHY has dp_com control block */
+ if (qmp->cfg->has_phy_dp_com_ctrl) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "dp_com");
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ qmp->dp_com = base;
+ }
+
+ mutex_init(&qmp->phy_mutex);
+
+ ret = qcom_qmp_phy_clk_init(dev);
+ if (ret)
+ return ret;
+
+ ret = qcom_qmp_phy_reset_init(dev);
+ if (ret)
+ return ret;
+
+ ret = qcom_qmp_phy_vreg_init(dev);
+ if (ret) {
+ dev_err(dev, "failed to get regulator supplies\n");
+ return ret;
+ }
+
+ num = of_get_available_child_count(dev->of_node);
+ /* do we have a rogue child node ? */
+ if (num > qmp->cfg->nlanes)
+ return -EINVAL;
+
+ qmp->phys = devm_kcalloc(dev, num, sizeof(*qmp->phys), GFP_KERNEL);
+ if (!qmp->phys)
+ return -ENOMEM;
+
+ id = 0;
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ /*
+ * Prevent runtime pm from being ON by default. Users can enable
+ * it using power/control in sysfs.
+ */
+ pm_runtime_forbid(dev);
+
+ for_each_available_child_of_node(dev->of_node, child) {
+ /* Create per-lane phy */
+ ret = qcom_qmp_phy_create(dev, child, id);
+ if (ret) {
+ dev_err(dev, "failed to create lane%d phy, %d\n",
+ id, ret);
+ pm_runtime_disable(dev);
+ return ret;
+ }
+
+ /*
+ * Register the pipe clock provided by phy.
+ * See function description to see details of this pipe clock.
+ */
+ ret = phy_pipe_clk_register(qmp, child);
+ if (ret) {
+ dev_err(qmp->dev,
+ "failed to register pipe clock source\n");
+ pm_runtime_disable(dev);
+ return ret;
+ }
+ id++;
+ }
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (!IS_ERR(phy_provider))
+ dev_info(dev, "Registered Qcom-QMP phy\n");
+ else
+ pm_runtime_disable(dev);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static struct platform_driver qcom_qmp_phy_driver = {
+ .probe = qcom_qmp_phy_probe,
+ .driver = {
+ .name = "qcom-qmp-phy",
+ .pm = &qcom_qmp_phy_pm_ops,
+ .of_match_table = qcom_qmp_phy_of_match_table,
+ },
+};
+
+module_platform_driver(qcom_qmp_phy_driver);
+
+MODULE_AUTHOR("Vivek Gautam <vivek.gautam@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm QMP PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/rr-cache/73eed3cfc6daf7318b95fa21c8447b3b17437276/thisimage b/rr-cache/73eed3cfc6daf7318b95fa21c8447b3b17437276/thisimage
index 9cfcc6a..890f981 100644
--- a/rr-cache/73eed3cfc6daf7318b95fa21c8447b3b17437276/thisimage
+++ b/rr-cache/73eed3cfc6daf7318b95fa21c8447b3b17437276/thisimage
@@ -699,6 +699,16 @@
#clock-cells = <1>;
};
+ blsp1_uart1: serial@7570000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0x07570000 0x1000>;
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_UART2_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ status = "disabled";
+ };
+
blsp1_spi0: spi@7575000 {
compatible = "qcom,spi-qup-v2.2.1";
reg = <0x07575000 0x600>;
@@ -904,6 +914,91 @@
#interrupt-cells = <4>;
};
+ ufsphy: phy@627000 {
+ compatible = "qcom,msm8996-ufs-phy-qmp-14nm";
+ reg = <0x627000 0xda8>;
+ reg-names = "phy_mem";
+ #phy-cells = <0>;
+
+ vdda-phy-supply = <&pm8994_l28>;
+ vdda-pll-supply = <&pm8994_l12>;
+
+ vdda-phy-max-microamp = <18380>;
+ vdda-pll-max-microamp = <9440>;
+
+ vddp-ref-clk-supply = <&pm8994_l25>;
+ vddp-ref-clk-max-microamp = <100>;
+ vddp-ref-clk-always-on;
+
+ clock-names = "ref_clk_src", "ref_clk";
+ clocks = <&rpmcc RPM_SMD_LN_BB_CLK>,
+ <&gcc GCC_UFS_CLKREF_CLK>;
+ status = "disabled";
+ };
+
+ ufshc@624000 {
+ compatible = "qcom,ufshc";
+ reg = <0x624000 0x2500>;
+ interrupts = <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>;
+
+ phys = <&ufsphy>;
+ phy-names = "ufsphy";
+
+ vcc-supply = <&pm8994_l20>;
+ vccq-supply = <&pm8994_l25>;
+ vccq2-supply = <&pm8994_s4>;
+
+ vcc-max-microamp = <600000>;
+ vccq-max-microamp = <450000>;
+ vccq2-max-microamp = <450000>;
+
+ power-domains = <&gcc UFS_GDSC>;
+
+ clock-names =
+ "core_clk_src",
+ "core_clk",
+ "bus_clk",
+ "bus_aggr_clk",
+ "iface_clk",
+ "core_clk_unipro_src",
+ "core_clk_unipro",
+ "core_clk_ice",
+ "ref_clk",
+ "tx_lane0_sync_clk",
+ "rx_lane0_sync_clk";
+ clocks =
+ <&gcc UFS_AXI_CLK_SRC>,
+ <&gcc GCC_UFS_AXI_CLK>,
+ <&gcc GCC_SYS_NOC_UFS_AXI_CLK>,
+ <&gcc GCC_AGGRE2_UFS_AXI_CLK>,
+ <&gcc GCC_UFS_AHB_CLK>,
+ <&gcc UFS_ICE_CORE_CLK_SRC>,
+ <&gcc GCC_UFS_UNIPRO_CORE_CLK>,
+ <&gcc GCC_UFS_ICE_CORE_CLK>,
+ <&rpmcc RPM_SMD_LN_BB_CLK>,
+ <&gcc GCC_UFS_TX_SYMBOL_0_CLK>,
+ <&gcc GCC_UFS_RX_SYMBOL_0_CLK>;
+ freq-table-hz =
+ <100000000 200000000>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <150000000 300000000>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>;
+
+ lanes-per-direction = <1>;
+ status = "disabled";
+
+ ufs_variant {
+ compatible = "qcom,ufs_variant";
+ };
+ };
+
mmcc: clock-controller@8c0000 {
compatible = "qcom,mmcc-msm8996";
#clock-cells = <1>;
@@ -1132,7 +1227,7 @@
#size-cells = <1>;
ranges;
- pcie0: qcom,pcie@600000 {
+ pcie0: pcie@600000 {
compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
status = "disabled";
power-domains = <&gcc PCIE0_GDSC>;
@@ -1169,7 +1264,6 @@
vdda-supply = <&pm8994_l28>;
- vdda-1p8-supply = <&pm8994_l12>;
linux,pci-domain = <0>;
@@ -1187,7 +1281,7 @@
};
- pcie1: qcom,pcie@608000 {
+ pcie1: pcie@608000 {
compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
power-domains = <&gcc PCIE1_GDSC>;
bus-range = <0x00 0xff>;
@@ -1226,7 +1320,6 @@
vdda-supply = <&pm8994_l28>;
- vdda-1p8-supply = <&pm8994_l12>;
linux,pci-domain = <1>;
clocks = <&gcc GCC_PCIE_1_PIPE_CLK>,
@@ -1242,7 +1335,7 @@
"bus_slave";
};
- pcie2: qcom,pcie@610000 {
+ pcie2: pcie@610000 {
compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
power-domains = <&gcc PCIE2_GDSC>;
bus-range = <0x00 0xff>;
@@ -1280,7 +1373,6 @@
pinctrl-1 = <&pcie2_clkreq_sleep &pcie2_perst_default &pcie2_wake_sleep >;
vdda-supply = <&pm8994_l28>;
- vdda-1p8-supply = <&pm8994_l12>;
linux,pci-domain = <2>;
clocks = <&gcc GCC_PCIE_2_PIPE_CLK>,
@@ -1297,86 +1389,6 @@
};
};
- ufsphy1: ufsphy@627000 {
- compatible = "qcom,msm8996-ufs-phy-qmp-14nm";
- reg = <0x627000 0xda8>;
- reg-names = "phy_mem";
- #phy-cells = <0>;
- vdda-phy-max-microamp = <18380>;
- vdda-pll-max-microamp = <9440>;
-
- vddp-ref-clk-supply = <&pm8994_l25>;
- vddp-ref-clk-max-microamp = <100>;
- vddp-ref-clk-always-on;
- clock-names = "ref_clk_src", "ref_clk";
- clocks = <&rpmcc RPM_SMD_LN_BB_CLK>,
- <&gcc GCC_UFS_CLKREF_CLK>;
- status = "disabled";
- power-domains = <&gcc UFS_GDSC>;
-
- };
-
- ufshc@624000 {
- compatible = "qcom,ufshc";
- reg = <0x624000 0x2500>;
- interrupts = <0 265 IRQ_TYPE_LEVEL_HIGH>;
-
- phys = <&ufsphy1>;
- phy-names = "ufsphy";
-
- vcc-supply = <&pm8994_l20>;
- vccq-supply = <&pm8994_l25>;
- vccq2-supply = <&pm8994_s4>;
-
- vcc-max-microamp = <600000>;
- vccq-max-microamp = <450000>;
- vccq2-max-microamp = <450000>;
-
- clock-names =
- "core_clk_src",
- "core_clk",
- "bus_clk",
- "bus_aggr_clk",
- "iface_clk",
- "core_clk_unipro_src",
- "core_clk_unipro",
- "core_clk_ice",
- "ref_clk",
- "tx_lane0_sync_clk",
- "rx_lane0_sync_clk";
- clocks =
- <&gcc UFS_AXI_CLK_SRC>,
- <&gcc GCC_UFS_AXI_CLK>,
- <&gcc GCC_SYS_NOC_UFS_AXI_CLK>,
- <&gcc GCC_AGGRE2_UFS_AXI_CLK>,
- <&gcc GCC_UFS_AHB_CLK>,
- <&gcc UFS_ICE_CORE_CLK_SRC>,
- <&gcc GCC_UFS_UNIPRO_CORE_CLK>,
- <&gcc GCC_UFS_ICE_CORE_CLK>,
- <&rpmcc RPM_SMD_LN_BB_CLK>,
- <&gcc GCC_UFS_TX_SYMBOL_0_CLK>,
- <&gcc GCC_UFS_RX_SYMBOL_0_CLK>;
- freq-table-hz =
- <100000000 200000000>,
- <0 0>,
- <0 0>,
- <0 0>,
- <0 0>,
- <150000000 300000000>,
- <0 0>,
- <0 0>,
- <0 0>,
- <0 0>,
- <0 0>;
-
- lanes-per-direction = <1>;
- status = "disabled";
-
- ufs_variant {
- compatible = "qcom,ufs_variant";
- };
- };
-
adreno_smmu: arm,smmu@b40000 {
compatible = "qcom,msm8996-smmu-v2";
reg = <0xb40000 0x10000>;
diff --git a/rr-cache/7801da766901361f856c697332a300f85bfc6d93/postimage b/rr-cache/7801da766901361f856c697332a300f85bfc6d93/postimage
new file mode 100644
index 0000000..1eb77d8
--- /dev/null
+++ b/rr-cache/7801da766901361f856c697332a300f85bfc6d93/postimage
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_BINDINGS_Q6_ASM_H__
+#define __DT_BINDINGS_Q6_ASM_H__
+
+#define MSM_FRONTEND_DAI_MULTIMEDIA1 0
+#define MSM_FRONTEND_DAI_MULTIMEDIA2 1
+#define MSM_FRONTEND_DAI_MULTIMEDIA3 2
+#define MSM_FRONTEND_DAI_MULTIMEDIA4 3
+#define MSM_FRONTEND_DAI_MULTIMEDIA5 4
+#define MSM_FRONTEND_DAI_MULTIMEDIA6 5
+#define MSM_FRONTEND_DAI_MULTIMEDIA7 6
+#define MSM_FRONTEND_DAI_MULTIMEDIA8 7
+#define MSM_FRONTEND_DAI_MULTIMEDIA9 8
+#define MSM_FRONTEND_DAI_MULTIMEDIA10 9
+#define MSM_FRONTEND_DAI_MULTIMEDIA11 10
+#define MSM_FRONTEND_DAI_MULTIMEDIA12 11
+#define MSM_FRONTEND_DAI_MULTIMEDIA13 12
+#define MSM_FRONTEND_DAI_MULTIMEDIA14 13
+#define MSM_FRONTEND_DAI_MULTIMEDIA15 14
+#define MSM_FRONTEND_DAI_MULTIMEDIA16 15
+
+#endif /* __DT_BINDINGS_Q6_ASM_H__ */
diff --git a/rr-cache/7801da766901361f856c697332a300f85bfc6d93/preimage b/rr-cache/7801da766901361f856c697332a300f85bfc6d93/preimage
new file mode 100644
index 0000000..8e4f7ba
--- /dev/null
+++ b/rr-cache/7801da766901361f856c697332a300f85bfc6d93/preimage
@@ -0,0 +1,35 @@
+<<<<<<<
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_BINDINGS_Q6_ASM_H__
+#define __DT_BINDINGS_Q6_ASM_H__
+
+#define MSM_FRONTEND_DAI_MULTIMEDIA1 0
+#define MSM_FRONTEND_DAI_MULTIMEDIA2 1
+#define MSM_FRONTEND_DAI_MULTIMEDIA3 2
+#define MSM_FRONTEND_DAI_MULTIMEDIA4 3
+#define MSM_FRONTEND_DAI_MULTIMEDIA5 4
+#define MSM_FRONTEND_DAI_MULTIMEDIA6 5
+=======
+// SPDX-License-Identifier: GPL-2.0
+#ifndef __DT_BINDINGS_Q6_ASM_H__
+#define __DT_BINDINGS_Q6_ASM_H__
+
+#define MSM_FRONTEND_DAI_MULTIMEDIA1 0
+#define MSM_FRONTEND_DAI_MULTIMEDIA2 1
+#define MSM_FRONTEND_DAI_MULTIMEDIA3 2
+#define MSM_FRONTEND_DAI_MULTIMEDIA4 3
+#define MSM_FRONTEND_DAI_MULTIMEDIA5 4
+#define MSM_FRONTEND_DAI_MULTIMEDIA6 5
+>>>>>>>
+#define MSM_FRONTEND_DAI_MULTIMEDIA7 6
+#define MSM_FRONTEND_DAI_MULTIMEDIA8 7
+#define MSM_FRONTEND_DAI_MULTIMEDIA9 8
+#define MSM_FRONTEND_DAI_MULTIMEDIA10 9
+#define MSM_FRONTEND_DAI_MULTIMEDIA11 10
+#define MSM_FRONTEND_DAI_MULTIMEDIA12 11
+#define MSM_FRONTEND_DAI_MULTIMEDIA13 12
+#define MSM_FRONTEND_DAI_MULTIMEDIA14 13
+#define MSM_FRONTEND_DAI_MULTIMEDIA15 14
+#define MSM_FRONTEND_DAI_MULTIMEDIA16 15
+
+#endif /* __DT_BINDINGS_Q6_ASM_H__ */
diff --git a/rr-cache/7801da766901361f856c697332a300f85bfc6d93/thisimage b/rr-cache/7801da766901361f856c697332a300f85bfc6d93/thisimage
new file mode 100644
index 0000000..8e4f7ba
--- /dev/null
+++ b/rr-cache/7801da766901361f856c697332a300f85bfc6d93/thisimage
@@ -0,0 +1,35 @@
+<<<<<<<
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_BINDINGS_Q6_ASM_H__
+#define __DT_BINDINGS_Q6_ASM_H__
+
+#define MSM_FRONTEND_DAI_MULTIMEDIA1 0
+#define MSM_FRONTEND_DAI_MULTIMEDIA2 1
+#define MSM_FRONTEND_DAI_MULTIMEDIA3 2
+#define MSM_FRONTEND_DAI_MULTIMEDIA4 3
+#define MSM_FRONTEND_DAI_MULTIMEDIA5 4
+#define MSM_FRONTEND_DAI_MULTIMEDIA6 5
+=======
+// SPDX-License-Identifier: GPL-2.0
+#ifndef __DT_BINDINGS_Q6_ASM_H__
+#define __DT_BINDINGS_Q6_ASM_H__
+
+#define MSM_FRONTEND_DAI_MULTIMEDIA1 0
+#define MSM_FRONTEND_DAI_MULTIMEDIA2 1
+#define MSM_FRONTEND_DAI_MULTIMEDIA3 2
+#define MSM_FRONTEND_DAI_MULTIMEDIA4 3
+#define MSM_FRONTEND_DAI_MULTIMEDIA5 4
+#define MSM_FRONTEND_DAI_MULTIMEDIA6 5
+>>>>>>>
+#define MSM_FRONTEND_DAI_MULTIMEDIA7 6
+#define MSM_FRONTEND_DAI_MULTIMEDIA8 7
+#define MSM_FRONTEND_DAI_MULTIMEDIA9 8
+#define MSM_FRONTEND_DAI_MULTIMEDIA10 9
+#define MSM_FRONTEND_DAI_MULTIMEDIA11 10
+#define MSM_FRONTEND_DAI_MULTIMEDIA12 11
+#define MSM_FRONTEND_DAI_MULTIMEDIA13 12
+#define MSM_FRONTEND_DAI_MULTIMEDIA14 13
+#define MSM_FRONTEND_DAI_MULTIMEDIA15 14
+#define MSM_FRONTEND_DAI_MULTIMEDIA16 15
+
+#endif /* __DT_BINDINGS_Q6_ASM_H__ */
diff --git a/rr-cache/8a4b54fff5a9c114500b6d66e8d0eeb044dae269/postimage b/rr-cache/8a4b54fff5a9c114500b6d66e8d0eeb044dae269/postimage
new file mode 100644
index 0000000..aa54e49
--- /dev/null
+++ b/rr-cache/8a4b54fff5a9c114500b6d66e8d0eeb044dae269/postimage
@@ -0,0 +1,109 @@
+* Qualcomm Technologies APQ8096 ASoC sound card driver
+
+This binding describes the APQ8096 sound card, which uses qdsp for audio.
+
+- compatible:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "qcom,apq8096-sndcard"
+
+- qcom,audio-routing:
+ Usage: Optional
+ Value type: <stringlist>
+ Definition: A list of the connections between audio components.
+ Each entry is a pair of strings, the first being the
+ connection's sink, the second being the connection's
+ source. Valid names could be power supplies, MicBias
+ of codec and the jacks on the board:
+ Valid names include:
+
+ Board Connectors:
+ "Headphone Left"
+ "Headphone Right"
+ "Earphone"
+ "Line Out1"
+ "Line Out2"
+ "Line Out3"
+ "Line Out4"
+ "Analog Mic1"
+ "Analog Mic2"
+ "Analog Mic3"
+ "Analog Mic4"
+ "Analog Mic5"
+ "Analog Mic6"
+ "Digital Mic2"
+ "Digital Mic3"
+
+ Audio pins and MicBias on WCD9335 Codec:
+ "MIC_BIAS1
+ "MIC_BIAS2"
+ "MIC_BIAS3"
+ "MIC_BIAS4"
+ "AMIC1"
+ "AMIC2"
+ "AMIC3"
+ "AMIC4"
+ "AMIC5"
+ "AMIC6"
+ "AMIC6"
+ "DMIC1"
+ "DMIC2"
+ "DMIC3"
+= dailinks
+Each subnode of sndcard represents either a dailink, and subnodes of each
+dailinks would be cpu/codec/platform dais.
+
+- link-name:
+ Usage: required
+ Value type: <string>
+ Definition: User friendly name for dai link
+
+= CPU, PLATFORM, CODEC dais subnodes
+- cpu:
+ Usage: required
+ Value type: <subnode>
+ Definition: cpu dai sub-node
+
+- codec:
+ Usage: Optional
+ Value type: <subnode>
+ Definition: codec dai sub-node
+
+- platform:
+ Usage: Optional
+ Value type: <subnode>
+ Definition: platform dai sub-node
+
+- sound-dai:
+ Usage: required
+ Value type: <phandle with arguments>
+ Definition: dai phandle/s and port of CPU/CODEC/PLATFORM node.
+
+Example:
+
+audio {
+ compatible = "qcom,apq8096-sndcard";
+ qcom,model = "DB820c";
+
+ mm1-dai-link {
+ link-name = "MultiMedia1";
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA1>;
+ };
+ };
+
+ hdmi-dai-link {
+ link-name = "HDMI Playback";
+ cpu {
+ sound-dai = <&q6afe HDMI_RX>;
+ };
+
+ platform {
+ sound-dai = <&q6adm>;
+ };
+
+ codec {
+ sound-dai = <&hdmi 0>;
+ };
+ };
+};
diff --git a/rr-cache/8a4b54fff5a9c114500b6d66e8d0eeb044dae269/preimage b/rr-cache/8a4b54fff5a9c114500b6d66e8d0eeb044dae269/preimage
new file mode 100644
index 0000000..0afa83b
--- /dev/null
+++ b/rr-cache/8a4b54fff5a9c114500b6d66e8d0eeb044dae269/preimage
@@ -0,0 +1,159 @@
+* Qualcomm Technologies APQ8096 ASoC sound card driver
+
+This binding describes the APQ8096 sound card, which uses qdsp for audio.
+
+- compatible:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "qcom,apq8096-sndcard"
+
+- qcom,audio-routing:
+ Usage: Optional
+ Value type: <stringlist>
+ Definition: A list of the connections between audio components.
+ Each entry is a pair of strings, the first being the
+ connection's sink, the second being the connection's
+ source. Valid names could be power supplies, MicBias
+ of codec and the jacks on the board:
+<<<<<<<
+ Valid names include:
+
+ Board Connectors:
+ "Headphone Left"
+ "Headphone Right"
+ "Earphone"
+ "Line Out1"
+ "Line Out2"
+ "Line Out3"
+ "Line Out4"
+ "Analog Mic1"
+ "Analog Mic2"
+ "Analog Mic3"
+ "Analog Mic4"
+ "Analog Mic5"
+ "Analog Mic6"
+ "Digital Mic2"
+ "Digital Mic3"
+
+ Audio pins and MicBias on WCD9335 Codec:
+ "MIC_BIAS1
+ "MIC_BIAS2"
+ "MIC_BIAS3"
+ "MIC_BIAS4"
+ "AMIC1"
+ "AMIC2"
+ "AMIC3"
+ "AMIC4"
+ "AMIC5"
+ "AMIC6"
+ "AMIC6"
+ "DMIC1"
+ "DMIC2"
+ "DMIC3"
+= dailinks
+Each subnode of sndcard represents either a dailink, and subnodes of each
+dailinks would be cpu/codec/platform dais.
+=======
+
+= FRONTEND and BACKEND dailinks
+Each subnode of sndcard represents either frontend or backend dailink,
+and subnodes of each backend/frontend dailinks would be
+cpu/codec/platform dais.
+>>>>>>>
+
+- link-name:
+ Usage: required
+ Value type: <string>
+ Definition: User friendly name for dai link
+
+<<<<<<<
+=======
+- is-fe:
+ Usage: optional
+ Value type: <bool>
+ Definition: present if the dailink is frontend
+
+
+>>>>>>>
+= CPU, PLATFORM, CODEC dais subnodes
+- cpu:
+ Usage: required
+ Value type: <subnode>
+ Definition: cpu dai sub-node
+
+- codec:
+<<<<<<<
+ Usage: Optional
+=======
+ Usage: required
+>>>>>>>
+ Value type: <subnode>
+ Definition: codec dai sub-node
+
+- platform:
+<<<<<<<
+ Usage: Optional
+=======
+ Usage: opional
+>>>>>>>
+ Value type: <subnode>
+ Definition: platform dai sub-node
+
+- sound-dai:
+ Usage: required
+<<<<<<<
+ Value type: <phandle with arguments>
+=======
+ Value type: <phandle>
+>>>>>>>
+ Definition: dai phandle/s and port of CPU/CODEC/PLATFORM node.
+
+Example:
+
+audio {
+ compatible = "qcom,apq8096-sndcard";
+ qcom,model = "DB820c";
+<<<<<<<
+ qcom,audio-routing =
+ "RX_BIAS", "MCLK";
+
+ fedai1 {
+ is-fe;
+ link-name = "MultiMedia1 Playback";
+ cpu {
+ sound-dai = <&q6asm MSM_FRONTEND_DAI_MULTIMEDIA1>;
+ };
+ platform {
+ sound-dai = <&q6asm MSM_FRONTEND_DAI_MULTIMEDIA1>;
+ };
+ };
+
+ bedai1 {
+ link-name = "HDMI Playback";
+ cpu {
+ sound-dai = <&q6afe AFE_PORT_HDMI_RX>;
+=======
+
+ mm1-dai-link {
+ link-name = "MultiMedia1";
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA1>;
+ };
+ };
+
+ hdmi-dai-link {
+ link-name = "HDMI Playback";
+ cpu {
+ sound-dai = <&q6afe HDMI_RX>;
+>>>>>>>
+ };
+
+ platform {
+ sound-dai = <&q6adm>;
+ };
+
+ codec {
+ sound-dai = <&hdmi 0>;
+ };
+ };
+};
diff --git a/rr-cache/8a4b54fff5a9c114500b6d66e8d0eeb044dae269/thisimage b/rr-cache/8a4b54fff5a9c114500b6d66e8d0eeb044dae269/thisimage
new file mode 100644
index 0000000..0afa83b
--- /dev/null
+++ b/rr-cache/8a4b54fff5a9c114500b6d66e8d0eeb044dae269/thisimage
@@ -0,0 +1,159 @@
+* Qualcomm Technologies APQ8096 ASoC sound card driver
+
+This binding describes the APQ8096 sound card, which uses qdsp for audio.
+
+- compatible:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "qcom,apq8096-sndcard"
+
+- qcom,audio-routing:
+ Usage: Optional
+ Value type: <stringlist>
+ Definition: A list of the connections between audio components.
+ Each entry is a pair of strings, the first being the
+ connection's sink, the second being the connection's
+ source. Valid names could be power supplies, MicBias
+ of codec and the jacks on the board:
+<<<<<<<
+ Valid names include:
+
+ Board Connectors:
+ "Headphone Left"
+ "Headphone Right"
+ "Earphone"
+ "Line Out1"
+ "Line Out2"
+ "Line Out3"
+ "Line Out4"
+ "Analog Mic1"
+ "Analog Mic2"
+ "Analog Mic3"
+ "Analog Mic4"
+ "Analog Mic5"
+ "Analog Mic6"
+ "Digital Mic2"
+ "Digital Mic3"
+
+ Audio pins and MicBias on WCD9335 Codec:
+ "MIC_BIAS1
+ "MIC_BIAS2"
+ "MIC_BIAS3"
+ "MIC_BIAS4"
+ "AMIC1"
+ "AMIC2"
+ "AMIC3"
+ "AMIC4"
+ "AMIC5"
+ "AMIC6"
+ "AMIC6"
+ "DMIC1"
+ "DMIC2"
+ "DMIC3"
+= dailinks
+Each subnode of sndcard represents either a dailink, and subnodes of each
+dailinks would be cpu/codec/platform dais.
+=======
+
+= FRONTEND and BACKEND dailinks
+Each subnode of sndcard represents either frontend or backend dailink,
+and subnodes of each backend/frontend dailinks would be
+cpu/codec/platform dais.
+>>>>>>>
+
+- link-name:
+ Usage: required
+ Value type: <string>
+ Definition: User friendly name for dai link
+
+<<<<<<<
+=======
+- is-fe:
+ Usage: optional
+ Value type: <bool>
+ Definition: present if the dailink is frontend
+
+
+>>>>>>>
+= CPU, PLATFORM, CODEC dais subnodes
+- cpu:
+ Usage: required
+ Value type: <subnode>
+ Definition: cpu dai sub-node
+
+- codec:
+<<<<<<<
+ Usage: Optional
+=======
+ Usage: required
+>>>>>>>
+ Value type: <subnode>
+ Definition: codec dai sub-node
+
+- platform:
+<<<<<<<
+ Usage: Optional
+=======
+ Usage: opional
+>>>>>>>
+ Value type: <subnode>
+ Definition: platform dai sub-node
+
+- sound-dai:
+ Usage: required
+<<<<<<<
+ Value type: <phandle with arguments>
+=======
+ Value type: <phandle>
+>>>>>>>
+ Definition: dai phandle/s and port of CPU/CODEC/PLATFORM node.
+
+Example:
+
+audio {
+ compatible = "qcom,apq8096-sndcard";
+ qcom,model = "DB820c";
+<<<<<<<
+ qcom,audio-routing =
+ "RX_BIAS", "MCLK";
+
+ fedai1 {
+ is-fe;
+ link-name = "MultiMedia1 Playback";
+ cpu {
+ sound-dai = <&q6asm MSM_FRONTEND_DAI_MULTIMEDIA1>;
+ };
+ platform {
+ sound-dai = <&q6asm MSM_FRONTEND_DAI_MULTIMEDIA1>;
+ };
+ };
+
+ bedai1 {
+ link-name = "HDMI Playback";
+ cpu {
+ sound-dai = <&q6afe AFE_PORT_HDMI_RX>;
+=======
+
+ mm1-dai-link {
+ link-name = "MultiMedia1";
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA1>;
+ };
+ };
+
+ hdmi-dai-link {
+ link-name = "HDMI Playback";
+ cpu {
+ sound-dai = <&q6afe HDMI_RX>;
+>>>>>>>
+ };
+
+ platform {
+ sound-dai = <&q6adm>;
+ };
+
+ codec {
+ sound-dai = <&hdmi 0>;
+ };
+ };
+};
diff --git a/rr-cache/8e0943aa3bcebd1a9dad662390dc8d5358d6c9a3/postimage.1 b/rr-cache/8e0943aa3bcebd1a9dad662390dc8d5358d6c9a3/postimage.1
new file mode 100644
index 0000000..7c50459
--- /dev/null
+++ b/rr-cache/8e0943aa3bcebd1a9dad662390dc8d5358d6c9a3/postimage.1
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+&pm8994_gpios {
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&ls_exp_gpio_f &bt_en_gpios>;
+
+ ls_exp_gpio_f: pm8994_gpio5 {
+ pinconf {
+ pins = "gpio5";
+ output-low;
+ power-source = <2>; // PM8994_GPIO_S4, 1.8V
+ };
+ };
+
+ bt_en_gpios: bt_en_gpios {
+ pinconf {
+ pins = "gpio19";
+ function = PMIC_GPIO_FUNC_NORMAL;
+ output-low;
+ power-source = <PM8994_GPIO_S4>; // 1.8V
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_LOW>;
+ bias-pull-down;
+ };
+ };
+
+ wlan_en_gpios: wlan_en_gpios {
+ pinconf {
+ pins = "gpio8";
+ function = PMIC_GPIO_FUNC_NORMAL;
+ output-low;
+ power-source = <PM8994_GPIO_S4>; // 1.8V
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_LOW>;
+ bias-pull-down;
+ };
+ };
+
+
+ audio_mclk: clk_div1 {
+ pinconf {
+ pins = "gpio15";
+ function = "func1";
+ power-source = <PM8994_GPIO_S4>; // 1.8V
+ };
+ };
+
+ volume_up_gpio: pm8996_gpio2 {
+ pinconf {
+ pins = "gpio2";
+ function = "normal";
+ input-enable;
+ drive-push-pull;
+ bias-pull-up;
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_NO>;
+ power-source = <PM8994_GPIO_S4>; // 1.8V
+ };
+ };
+
+ divclk4_pin_a: divclk4 {
+ pinconf {
+ pins = "gpio18";
+ function = PMIC_GPIO_FUNC_FUNC2;
+
+ bias-disable;
+ power-source = <PM8994_GPIO_S4>;
+ };
+ };
+
+ usb3_vbus_det_gpio: pm8996_gpio22 {
+ pinconf {
+ pins = "gpio22";
+ function = PMIC_GPIO_FUNC_NORMAL;
+ input-enable;
+ bias-pull-down;
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_NO>;
+ power-source = <PM8994_GPIO_S4>; // 1.8V
+ };
+ };
+};
+
+&pmi8994_gpios {
+ usb2_vbus_det_gpio: pmi8996_gpio6 {
+ pinconf {
+ pins = "gpio6";
+ function = PMIC_GPIO_FUNC_NORMAL;
+ input-enable;
+ bias-pull-down;
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_NO>;
+ power-source = <PM8994_GPIO_S4>; // 1.8V
+ };
+ };
+};
diff --git a/rr-cache/8e0943aa3bcebd1a9dad662390dc8d5358d6c9a3/preimage b/rr-cache/8e0943aa3bcebd1a9dad662390dc8d5358d6c9a3/preimage
new file mode 100644
index 0000000..ad41b2e
--- /dev/null
+++ b/rr-cache/8e0943aa3bcebd1a9dad662390dc8d5358d6c9a3/preimage
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+&pm8994_gpios {
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&ls_exp_gpio_f &bt_en_gpios>;
+
+ ls_exp_gpio_f: pm8994_gpio5 {
+ pinconf {
+ pins = "gpio5";
+ output-low;
+ power-source = <2>; // PM8994_GPIO_S4, 1.8V
+ };
+ };
+
+ bt_en_gpios: bt_en_gpios {
+ pinconf {
+ pins = "gpio19";
+ function = PMIC_GPIO_FUNC_NORMAL;
+ output-low;
+ power-source = <PM8994_GPIO_S4>; // 1.8V
+<<<<<<<
+ qcom,drive-strength = <3>;
+=======
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_LOW>;
+>>>>>>>
+ bias-pull-down;
+ };
+ };
+
+ wlan_en_gpios: wlan_en_gpios {
+ pinconf {
+ pins = "gpio8";
+ function = PMIC_GPIO_FUNC_NORMAL;
+ output-low;
+ power-source = <PM8994_GPIO_S4>; // 1.8V
+<<<<<<<
+ qcom,drive-strength = <3>;
+=======
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_LOW>;
+>>>>>>>
+ bias-pull-down;
+ };
+ };
+
+<<<<<<<
+=======
+
+ audio_mclk: clk_div1 {
+ pinconf {
+ pins = "gpio15";
+ function = "func1";
+ power-source = <PM8994_GPIO_S4>; // 1.8V
+ };
+ };
+
+>>>>>>>
+ volume_up_gpio: pm8996_gpio2 {
+ pinconf {
+ pins = "gpio2";
+ function = "normal";
+ input-enable;
+ drive-push-pull;
+ bias-pull-up;
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_NO>;
+ power-source = <PM8994_GPIO_S4>; // 1.8V
+ };
+ };
+
+ divclk4_pin_a: divclk4 {
+ pinconf {
+ pins = "gpio18";
+ function = PMIC_GPIO_FUNC_FUNC2;
+
+ bias-disable;
+ power-source = <PM8994_GPIO_S4>;
+ };
+ };
+
+ usb3_vbus_det_gpio: pm8996_gpio22 {
+ pinconf {
+ pins = "gpio22";
+ function = PMIC_GPIO_FUNC_NORMAL;
+ input-enable;
+ bias-pull-down;
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_NO>;
+ power-source = <PM8994_GPIO_S4>; // 1.8V
+ };
+ };
+};
+
+&pmi8994_gpios {
+ usb2_vbus_det_gpio: pmi8996_gpio6 {
+ pinconf {
+ pins = "gpio6";
+ function = PMIC_GPIO_FUNC_NORMAL;
+ input-enable;
+ bias-pull-down;
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_NO>;
+ power-source = <PM8994_GPIO_S4>; // 1.8V
+ };
+ };
+};
diff --git a/rr-cache/8e0943aa3bcebd1a9dad662390dc8d5358d6c9a3/preimage.1 b/rr-cache/8e0943aa3bcebd1a9dad662390dc8d5358d6c9a3/preimage.1
new file mode 100644
index 0000000..ad41b2e
--- /dev/null
+++ b/rr-cache/8e0943aa3bcebd1a9dad662390dc8d5358d6c9a3/preimage.1
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+&pm8994_gpios {
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&ls_exp_gpio_f &bt_en_gpios>;
+
+ ls_exp_gpio_f: pm8994_gpio5 {
+ pinconf {
+ pins = "gpio5";
+ output-low;
+ power-source = <2>; // PM8994_GPIO_S4, 1.8V
+ };
+ };
+
+ bt_en_gpios: bt_en_gpios {
+ pinconf {
+ pins = "gpio19";
+ function = PMIC_GPIO_FUNC_NORMAL;
+ output-low;
+ power-source = <PM8994_GPIO_S4>; // 1.8V
+<<<<<<<
+ qcom,drive-strength = <3>;
+=======
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_LOW>;
+>>>>>>>
+ bias-pull-down;
+ };
+ };
+
+ wlan_en_gpios: wlan_en_gpios {
+ pinconf {
+ pins = "gpio8";
+ function = PMIC_GPIO_FUNC_NORMAL;
+ output-low;
+ power-source = <PM8994_GPIO_S4>; // 1.8V
+<<<<<<<
+ qcom,drive-strength = <3>;
+=======
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_LOW>;
+>>>>>>>
+ bias-pull-down;
+ };
+ };
+
+<<<<<<<
+=======
+
+ audio_mclk: clk_div1 {
+ pinconf {
+ pins = "gpio15";
+ function = "func1";
+ power-source = <PM8994_GPIO_S4>; // 1.8V
+ };
+ };
+
+>>>>>>>
+ volume_up_gpio: pm8996_gpio2 {
+ pinconf {
+ pins = "gpio2";
+ function = "normal";
+ input-enable;
+ drive-push-pull;
+ bias-pull-up;
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_NO>;
+ power-source = <PM8994_GPIO_S4>; // 1.8V
+ };
+ };
+
+ divclk4_pin_a: divclk4 {
+ pinconf {
+ pins = "gpio18";
+ function = PMIC_GPIO_FUNC_FUNC2;
+
+ bias-disable;
+ power-source = <PM8994_GPIO_S4>;
+ };
+ };
+
+ usb3_vbus_det_gpio: pm8996_gpio22 {
+ pinconf {
+ pins = "gpio22";
+ function = PMIC_GPIO_FUNC_NORMAL;
+ input-enable;
+ bias-pull-down;
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_NO>;
+ power-source = <PM8994_GPIO_S4>; // 1.8V
+ };
+ };
+};
+
+&pmi8994_gpios {
+ usb2_vbus_det_gpio: pmi8996_gpio6 {
+ pinconf {
+ pins = "gpio6";
+ function = PMIC_GPIO_FUNC_NORMAL;
+ input-enable;
+ bias-pull-down;
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_NO>;
+ power-source = <PM8994_GPIO_S4>; // 1.8V
+ };
+ };
+};
diff --git a/rr-cache/8e0943aa3bcebd1a9dad662390dc8d5358d6c9a3/thisimage.1 b/rr-cache/8e0943aa3bcebd1a9dad662390dc8d5358d6c9a3/thisimage.1
new file mode 100644
index 0000000..ad41b2e
--- /dev/null
+++ b/rr-cache/8e0943aa3bcebd1a9dad662390dc8d5358d6c9a3/thisimage.1
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+&pm8994_gpios {
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&ls_exp_gpio_f &bt_en_gpios>;
+
+ ls_exp_gpio_f: pm8994_gpio5 {
+ pinconf {
+ pins = "gpio5";
+ output-low;
+ power-source = <2>; // PM8994_GPIO_S4, 1.8V
+ };
+ };
+
+ bt_en_gpios: bt_en_gpios {
+ pinconf {
+ pins = "gpio19";
+ function = PMIC_GPIO_FUNC_NORMAL;
+ output-low;
+ power-source = <PM8994_GPIO_S4>; // 1.8V
+<<<<<<<
+ qcom,drive-strength = <3>;
+=======
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_LOW>;
+>>>>>>>
+ bias-pull-down;
+ };
+ };
+
+ wlan_en_gpios: wlan_en_gpios {
+ pinconf {
+ pins = "gpio8";
+ function = PMIC_GPIO_FUNC_NORMAL;
+ output-low;
+ power-source = <PM8994_GPIO_S4>; // 1.8V
+<<<<<<<
+ qcom,drive-strength = <3>;
+=======
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_LOW>;
+>>>>>>>
+ bias-pull-down;
+ };
+ };
+
+<<<<<<<
+=======
+
+ audio_mclk: clk_div1 {
+ pinconf {
+ pins = "gpio15";
+ function = "func1";
+ power-source = <PM8994_GPIO_S4>; // 1.8V
+ };
+ };
+
+>>>>>>>
+ volume_up_gpio: pm8996_gpio2 {
+ pinconf {
+ pins = "gpio2";
+ function = "normal";
+ input-enable;
+ drive-push-pull;
+ bias-pull-up;
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_NO>;
+ power-source = <PM8994_GPIO_S4>; // 1.8V
+ };
+ };
+
+ divclk4_pin_a: divclk4 {
+ pinconf {
+ pins = "gpio18";
+ function = PMIC_GPIO_FUNC_FUNC2;
+
+ bias-disable;
+ power-source = <PM8994_GPIO_S4>;
+ };
+ };
+
+ usb3_vbus_det_gpio: pm8996_gpio22 {
+ pinconf {
+ pins = "gpio22";
+ function = PMIC_GPIO_FUNC_NORMAL;
+ input-enable;
+ bias-pull-down;
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_NO>;
+ power-source = <PM8994_GPIO_S4>; // 1.8V
+ };
+ };
+};
+
+&pmi8994_gpios {
+ usb2_vbus_det_gpio: pmi8996_gpio6 {
+ pinconf {
+ pins = "gpio6";
+ function = PMIC_GPIO_FUNC_NORMAL;
+ input-enable;
+ bias-pull-down;
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_NO>;
+ power-source = <PM8994_GPIO_S4>; // 1.8V
+ };
+ };
+};
diff --git a/rr-cache/96f5f65a10705f9495fa7195eaf40e288db44217/postimage b/rr-cache/96f5f65a10705f9495fa7195eaf40e288db44217/postimage
new file mode 100644
index 0000000..87838fa
--- /dev/null
+++ b/rr-cache/96f5f65a10705f9495fa7195eaf40e288db44217/postimage
@@ -0,0 +1,92 @@
+config SND_SOC_QCOM
+ tristate "ASoC support for QCOM platforms"
+ depends on ARCH_QCOM || COMPILE_TEST
+ help
+ Say Y or M if you want to add support to use audio devices
+ in Qualcomm Technologies SOC-based platforms.
+
+config SND_SOC_LPASS_CPU
+ tristate
+ select REGMAP_MMIO
+
+config SND_SOC_LPASS_PLATFORM
+ tristate
+ select REGMAP_MMIO
+
+config SND_SOC_LPASS_IPQ806X
+ tristate
+ select SND_SOC_LPASS_CPU
+ select SND_SOC_LPASS_PLATFORM
+
+config SND_SOC_LPASS_APQ8016
+ tristate
+ select SND_SOC_LPASS_CPU
+ select SND_SOC_LPASS_PLATFORM
+
+config SND_SOC_STORM
+ tristate "ASoC I2S support for Storm boards"
+ depends on SND_SOC_QCOM
+ select SND_SOC_LPASS_IPQ806X
+ select SND_SOC_MAX98357A
+ help
+ Say Y or M if you want add support for SoC audio on the
+ Qualcomm Technologies IPQ806X-based Storm board.
+
+config SND_SOC_APQ8016_SBC
+ tristate "SoC Audio support for APQ8016 SBC platforms"
+ depends on SND_SOC_QCOM
+ select SND_SOC_LPASS_APQ8016
+ help
+ Support for Qualcomm Technologies LPASS audio block in
+ APQ8016 SOC-based systems.
+ Say Y if you want to use audio devices on MI2S.
+
+config SND_SOC_QDSP6_COMMON
+ tristate
+
+config SND_SOC_QDSP6_CORE
+ tristate
+
+config SND_SOC_QDSP6_AFE
+ tristate
+
+config SND_SOC_QDSP6_AFE_DAI
+ tristate
+
+config SND_SOC_QDSP6_ADM
+ tristate
+
+config SND_SOC_QDSP6_ROUTING
+ tristate
+
+config SND_SOC_QDSP6_ASM
+ tristate
+
+config SND_SOC_QDSP6_ASM_DAI
+ tristate
+
+config SND_SOC_QDSP6
+ tristate "SoC ALSA audio driver for QDSP6"
+ depends on QCOM_APR && HAS_DMA
+ select SND_SOC_QDSP6_COMMON
+ select SND_SOC_QDSP6_CORE
+ select SND_SOC_QDSP6_AFE
+ select SND_SOC_QDSP6_AFE_DAI
+ select SND_SOC_QDSP6_ADM
+ select SND_SOC_QDSP6_ROUTING
+ select SND_SOC_QDSP6_ASM
+ select SND_SOC_QDSP6_ASM_DAI
+ help
+ To add support for MSM QDSP6 Soc Audio.
+ This will enable sound soc platform specific
+ audio drivers. This includes q6asm, q6adm,
+ q6afe interfaces to DSP using apr.
+
+config SND_SOC_MSM8996
+ tristate "SoC Machine driver for MSM8996 and APQ8096 boards"
+ depends on QCOM_APR
+ select SND_SOC_QDSP6
+ help
+ Support for Qualcomm Technologies LPASS audio block in
+ APQ8096 SoC-based systems.
+ Say Y if you want to use audio device on this SoCs
diff --git a/rr-cache/96f5f65a10705f9495fa7195eaf40e288db44217/preimage b/rr-cache/96f5f65a10705f9495fa7195eaf40e288db44217/preimage
new file mode 100644
index 0000000..ffc682b
--- /dev/null
+++ b/rr-cache/96f5f65a10705f9495fa7195eaf40e288db44217/preimage
@@ -0,0 +1,134 @@
+config SND_SOC_QCOM
+ tristate "ASoC support for QCOM platforms"
+ depends on ARCH_QCOM || COMPILE_TEST
+ help
+ Say Y or M if you want to add support to use audio devices
+ in Qualcomm Technologies SOC-based platforms.
+
+config SND_SOC_LPASS_CPU
+ tristate
+ select REGMAP_MMIO
+
+config SND_SOC_LPASS_PLATFORM
+ tristate
+ select REGMAP_MMIO
+
+config SND_SOC_LPASS_IPQ806X
+ tristate
+ select SND_SOC_LPASS_CPU
+ select SND_SOC_LPASS_PLATFORM
+
+config SND_SOC_LPASS_APQ8016
+ tristate
+ select SND_SOC_LPASS_CPU
+ select SND_SOC_LPASS_PLATFORM
+
+config SND_SOC_STORM
+ tristate "ASoC I2S support for Storm boards"
+ depends on SND_SOC_QCOM
+ select SND_SOC_LPASS_IPQ806X
+ select SND_SOC_MAX98357A
+ help
+ Say Y or M if you want add support for SoC audio on the
+ Qualcomm Technologies IPQ806X-based Storm board.
+
+config SND_SOC_APQ8016_SBC
+ tristate "SoC Audio support for APQ8016 SBC platforms"
+ depends on SND_SOC_QCOM
+ select SND_SOC_LPASS_APQ8016
+ help
+ Support for Qualcomm Technologies LPASS audio block in
+ APQ8016 SOC-based systems.
+ Say Y if you want to use audio devices on MI2S.
+
+config SND_SOC_QDSP6_COMMON
+ tristate
+<<<<<<<
+ default n
+
+config SND_SOC_QDSP6_AFE
+ tristate
+ default n
+
+config SND_SOC_QDSP6_ADM
+ tristate
+ default n
+
+config SND_SOC_QDSP6_ASM
+ tristate
+ default n
+
+config SND_SOC_QDSP6_CORE
+ tristate
+ default n
+=======
+
+config SND_SOC_QDSP6_CORE
+ tristate
+
+config SND_SOC_QDSP6_AFE
+ tristate
+
+config SND_SOC_QDSP6_AFE_DAI
+ tristate
+
+config SND_SOC_QDSP6_ADM
+ tristate
+
+config SND_SOC_QDSP6_ROUTING
+ tristate
+
+config SND_SOC_QDSP6_ASM
+ tristate
+
+config SND_SOC_QDSP6_ASM_DAI
+ tristate
+>>>>>>>
+
+config SND_SOC_QDSP6
+ tristate "SoC ALSA audio driver for QDSP6"
+ depends on QCOM_APR && HAS_DMA
+ select SND_SOC_QDSP6_COMMON
+<<<<<<<
+ select SND_SOC_QDSP6_AFE
+ select SND_SOC_QDSP6_ADM
+ select SND_SOC_QDSP6_ASM
+ select SND_SOC_QDSP6_CORE
+=======
+ select SND_SOC_QDSP6_CORE
+ select SND_SOC_QDSP6_AFE
+ select SND_SOC_QDSP6_AFE_DAI
+ select SND_SOC_QDSP6_ADM
+ select SND_SOC_QDSP6_ROUTING
+ select SND_SOC_QDSP6_ASM
+ select SND_SOC_QDSP6_ASM_DAI
+>>>>>>>
+ help
+ To add support for MSM QDSP6 Soc Audio.
+ This will enable sound soc platform specific
+ audio drivers. This includes q6asm, q6adm,
+ q6afe interfaces to DSP using apr.
+
+config SND_SOC_MSM8996
+ tristate "SoC Machine driver for MSM8996 and APQ8096 boards"
+ depends on QCOM_APR
+<<<<<<<
+ select SND_SOC_QDSP6
+ help
+ Support for Qualcomm Technologies LPASS audio block in
+ APQ8096 SoC-based systems.
+ Say Y if you want to use audio device on this SoCs
+=======
+ select SND_SOC_QDSP6V2
+ default n
+ help
+ To add support for SoC audio on MSM8996 and APQ8096 boards
+
+config SND_SOC_APQ8064
+ tristate "SoC Machine driver for APQ8064 based boards"
+ depends on QCOM_APR
+ select SND_SOC_QDSP6
+ default n
+ help
+ To add support for SoC audio on APQ8064 based boards
+>>>>>>>
diff --git a/rr-cache/96f5f65a10705f9495fa7195eaf40e288db44217/thisimage b/rr-cache/96f5f65a10705f9495fa7195eaf40e288db44217/thisimage
new file mode 100644
index 0000000..ffc682b
--- /dev/null
+++ b/rr-cache/96f5f65a10705f9495fa7195eaf40e288db44217/thisimage
@@ -0,0 +1,134 @@
+config SND_SOC_QCOM
+ tristate "ASoC support for QCOM platforms"
+ depends on ARCH_QCOM || COMPILE_TEST
+ help
+ Say Y or M if you want to add support to use audio devices
+ in Qualcomm Technologies SOC-based platforms.
+
+config SND_SOC_LPASS_CPU
+ tristate
+ select REGMAP_MMIO
+
+config SND_SOC_LPASS_PLATFORM
+ tristate
+ select REGMAP_MMIO
+
+config SND_SOC_LPASS_IPQ806X
+ tristate
+ select SND_SOC_LPASS_CPU
+ select SND_SOC_LPASS_PLATFORM
+
+config SND_SOC_LPASS_APQ8016
+ tristate
+ select SND_SOC_LPASS_CPU
+ select SND_SOC_LPASS_PLATFORM
+
+config SND_SOC_STORM
+ tristate "ASoC I2S support for Storm boards"
+ depends on SND_SOC_QCOM
+ select SND_SOC_LPASS_IPQ806X
+ select SND_SOC_MAX98357A
+ help
+ Say Y or M if you want add support for SoC audio on the
+ Qualcomm Technologies IPQ806X-based Storm board.
+
+config SND_SOC_APQ8016_SBC
+ tristate "SoC Audio support for APQ8016 SBC platforms"
+ depends on SND_SOC_QCOM
+ select SND_SOC_LPASS_APQ8016
+ help
+ Support for Qualcomm Technologies LPASS audio block in
+ APQ8016 SOC-based systems.
+ Say Y if you want to use audio devices on MI2S.
+
+config SND_SOC_QDSP6_COMMON
+ tristate
+<<<<<<<
+ default n
+
+config SND_SOC_QDSP6_AFE
+ tristate
+ default n
+
+config SND_SOC_QDSP6_ADM
+ tristate
+ default n
+
+config SND_SOC_QDSP6_ASM
+ tristate
+ default n
+
+config SND_SOC_QDSP6_CORE
+ tristate
+ default n
+=======
+
+config SND_SOC_QDSP6_CORE
+ tristate
+
+config SND_SOC_QDSP6_AFE
+ tristate
+
+config SND_SOC_QDSP6_AFE_DAI
+ tristate
+
+config SND_SOC_QDSP6_ADM
+ tristate
+
+config SND_SOC_QDSP6_ROUTING
+ tristate
+
+config SND_SOC_QDSP6_ASM
+ tristate
+
+config SND_SOC_QDSP6_ASM_DAI
+ tristate
+>>>>>>>
+
+config SND_SOC_QDSP6
+ tristate "SoC ALSA audio driver for QDSP6"
+ depends on QCOM_APR && HAS_DMA
+ select SND_SOC_QDSP6_COMMON
+<<<<<<<
+ select SND_SOC_QDSP6_AFE
+ select SND_SOC_QDSP6_ADM
+ select SND_SOC_QDSP6_ASM
+ select SND_SOC_QDSP6_CORE
+=======
+ select SND_SOC_QDSP6_CORE
+ select SND_SOC_QDSP6_AFE
+ select SND_SOC_QDSP6_AFE_DAI
+ select SND_SOC_QDSP6_ADM
+ select SND_SOC_QDSP6_ROUTING
+ select SND_SOC_QDSP6_ASM
+ select SND_SOC_QDSP6_ASM_DAI
+>>>>>>>
+ help
+ To add support for MSM QDSP6 Soc Audio.
+ This will enable sound soc platform specific
+ audio drivers. This includes q6asm, q6adm,
+ q6afe interfaces to DSP using apr.
+
+config SND_SOC_MSM8996
+ tristate "SoC Machine driver for MSM8996 and APQ8096 boards"
+ depends on QCOM_APR
+<<<<<<<
+ select SND_SOC_QDSP6
+ help
+ Support for Qualcomm Technologies LPASS audio block in
+ APQ8096 SoC-based systems.
+ Say Y if you want to use audio device on this SoCs
+=======
+ select SND_SOC_QDSP6V2
+ default n
+ help
+ To add support for SoC audio on MSM8996 and APQ8096 boards
+
+config SND_SOC_APQ8064
+ tristate "SoC Machine driver for APQ8064 based boards"
+ depends on QCOM_APR
+ select SND_SOC_QDSP6
+ default n
+ help
+ To add support for SoC audio on APQ8064 based boards
+>>>>>>>
diff --git a/rr-cache/9d52687238be7967036fe7e752f5c0259d75823d/postimage b/rr-cache/9d52687238be7967036fe7e752f5c0259d75823d/postimage
new file mode 100644
index 0000000..fa5e702
--- /dev/null
+++ b/rr-cache/9d52687238be7967036fe7e752f5c0259d75823d/postimage
@@ -0,0 +1,1422 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/qcom,gcc-msm8996.h>
+#include <dt-bindings/clock/qcom,mmcc-msm8996.h>
+#include <dt-bindings/clock/qcom,rpmcc.h>
+
+/ {
+ model = "Qualcomm Technologies, Inc. MSM8996";
+
+ interrupt-parent = <&intc>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen { };
+
+ memory {
+ device_type = "memory";
+ /* We expect the bootloader to fill in the reg */
+ reg = <0 0 0 0>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ mba_region: mba@91500000 {
+ reg = <0x0 0x91500000 0x0 0x200000>;
+ no-map;
+ };
+
+ slpi_region: slpi@90b00000 {
+ reg = <0x0 0x90b00000 0x0 0xa00000>;
+ no-map;
+ };
+
+ venus_region: venus@90400000 {
+ reg = <0x0 0x90400000 0x0 0x700000>;
+ no-map;
+ };
+
+ adsp_region: adsp@8ea00000 {
+ reg = <0x0 0x8ea00000 0x0 0x1a00000>;
+ no-map;
+ };
+
+ mpss_region: mpss@88800000 {
+ reg = <0x0 0x88800000 0x0 0x6200000>;
+ no-map;
+ };
+
+ smem_mem: smem-mem@86000000 {
+ reg = <0x0 0x86000000 0x0 0x200000>;
+ no-map;
+ };
+
+ memory@85800000 {
+ reg = <0x0 0x85800000 0x0 0x800000>;
+ no-map;
+ };
+
+ memory@86200000 {
+ reg = <0x0 0x86200000 0x0 0x2600000>;
+ no-map;
+ };
+
+ rmtfs@86700000 {
+ compatible = "qcom,rmtfs-mem";
+
+ size = <0x0 0x200000>;
+ alloc-ranges = <0x0 0xa0000000 0x0 0x2000000>;
+ no-map;
+
+ qcom,client-id = <1>;
+ qcom,vmid = <15>;
+ };
+
+ zap_shader_region: gpu@8f200000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x8f200000 0 0x2300000>;
+ no-map;
+ };
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ CPU0: cpu@0 {
+ device_type = "cpu";
+ compatible = "qcom,kryo";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ next-level-cache = <&L2_0>;
+ L2_0: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ };
+ };
+
+ CPU1: cpu@1 {
+ device_type = "cpu";
+ compatible = "qcom,kryo";
+ reg = <0x0 0x1>;
+ enable-method = "psci";
+ next-level-cache = <&L2_0>;
+ };
+
+ CPU2: cpu@100 {
+ device_type = "cpu";
+ compatible = "qcom,kryo";
+ reg = <0x0 0x100>;
+ enable-method = "psci";
+ next-level-cache = <&L2_1>;
+ L2_1: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ };
+ };
+
+ CPU3: cpu@101 {
+ device_type = "cpu";
+ compatible = "qcom,kryo";
+ reg = <0x0 0x101>;
+ enable-method = "psci";
+ next-level-cache = <&L2_1>;
+ };
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&CPU0>;
+ };
+
+ core1 {
+ cpu = <&CPU1>;
+ };
+ };
+
+ cluster1 {
+ core0 {
+ cpu = <&CPU2>;
+ };
+
+ core1 {
+ cpu = <&CPU3>;
+ };
+ };
+ };
+ };
+
+ thermal-zones {
+ cpu-thermal0 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 3>;
+
+ trips {
+ cpu_alert0: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit0: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal1 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 5>;
+
+ trips {
+ cpu_alert1: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit1: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal2 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 8>;
+
+ trips {
+ cpu_alert2: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit2: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal3 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 10>;
+
+ trips {
+ cpu_alert3: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit3: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ clocks {
+ xo_board: xo_board {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <19200000>;
+ clock-output-names = "xo_board";
+ };
+
+ sleep_clk: sleep_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32764>;
+ clock-output-names = "sleep_clk";
+ };
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ firmware {
+ scm {
+ compatible = "qcom,scm-msm8996";
+
+ qcom,dload-mode = <&tcsr 0x13000>;
+ };
+ };
+
+ tcsr_mutex: hwlock {
+ compatible = "qcom,tcsr-mutex";
+ syscon = <&tcsr_mutex_regs 0 0x1000>;
+ #hwlock-cells = <1>;
+ };
+
+ smem {
+ compatible = "qcom,smem";
+ memory-region = <&smem_mem>;
+ hwlocks = <&tcsr_mutex 3>;
+ };
+
+ rpm-glink {
+ compatible = "qcom,glink-rpm";
+
+ interrupts = <GIC_SPI 168 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,rpm-msg-ram = <&rpm_msg_ram>;
+
+ mboxes = <&apcs_glb 0>;
+
+ rpm_requests {
+ compatible = "qcom,rpm-msm8996";
+ qcom,glink-channels = "rpm_requests";
+
+ rpmcc: qcom,rpmcc {
+ compatible = "qcom,rpmcc-msm8996";
+ #clock-cells = <1>;
+ };
+
+ pm8994-regulators {
+ compatible = "qcom,rpm-pm8994-regulators";
+
+ pm8994_s1: s1 {};
+ pm8994_s2: s2 {};
+ pm8994_s3: s3 {};
+ pm8994_s4: s4 {};
+ pm8994_s5: s5 {};
+ pm8994_s6: s6 {};
+ pm8994_s7: s7 {};
+ pm8994_s8: s8 {};
+ pm8994_s9: s9 {};
+ pm8994_s10: s10 {};
+ pm8994_s11: s11 {};
+ pm8994_s12: s12 {};
+
+ pm8994_l1: l1 {};
+ pm8994_l2: l2 {};
+ pm8994_l3: l3 {};
+ pm8994_l4: l4 {};
+ pm8994_l5: l5 {};
+ pm8994_l6: l6 {};
+ pm8994_l7: l7 {};
+ pm8994_l8: l8 {};
+ pm8994_l9: l9 {};
+ pm8994_l10: l10 {};
+ pm8994_l11: l11 {};
+ pm8994_l12: l12 {};
+ pm8994_l13: l13 {};
+ pm8994_l14: l14 {};
+ pm8994_l15: l15 {};
+ pm8994_l16: l16 {};
+ pm8994_l17: l17 {};
+ pm8994_l18: l18 {};
+ pm8994_l19: l19 {};
+ pm8994_l20: l20 {};
+ pm8994_l21: l21 {};
+ pm8994_l22: l22 {};
+ pm8994_l23: l23 {};
+ pm8994_l24: l24 {};
+ pm8994_l25: l25 {};
+ pm8994_l26: l26 {};
+ pm8994_l27: l27 {};
+ pm8994_l28: l28 {};
+ pm8994_l29: l29 {};
+ pm8994_l30: l30 {};
+ pm8994_l31: l31 {};
+ pm8994_l32: l32 {};
+ };
+
+ };
+ };
+
+ soc: soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0xffffffff>;
+ compatible = "simple-bus";
+
+ rpm_msg_ram: memory@68000 {
+ compatible = "qcom,rpm-msg-ram";
+ reg = <0x68000 0x6000>;
+ };
+
+ tcsr_mutex_regs: syscon@740000 {
+ compatible = "syscon";
+ reg = <0x740000 0x20000>;
+ };
+
+ tcsr: syscon@7a0000 {
+ compatible = "qcom,tcsr-msm8996", "syscon";
+ reg = <0x7a0000 0x18000>;
+ };
+
+ intc: interrupt-controller@9bc0000 {
+ compatible = "arm,gic-v3";
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ #redistributor-regions = <1>;
+ redistributor-stride = <0x0 0x40000>;
+ reg = <0x09bc0000 0x10000>,
+ <0x09c00000 0x100000>;
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ apcs: syscon@9820000 {
+ compatible = "syscon";
+ reg = <0x9820000 0x1000>;
+ };
+
+ apcs_glb: mailbox@9820000 {
+ compatible = "qcom,msm8996-apcs-hmss-global";
+ reg = <0x9820000 0x1000>;
+
+ #mbox-cells = <1>;
+ };
+
+ gcc: clock-controller@300000 {
+ compatible = "qcom,gcc-msm8996";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ reg = <0x300000 0x90000>;
+ };
+
+ kryocc: clock-controller@6400000 {
+ compatible = "qcom,apcc-msm8996";
+ reg = <0x6400000 0x90000>;
+ #clock-cells = <1>;
+ };
+
+ blsp1_uart1: serial@7570000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0x07570000 0x1000>;
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_UART2_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ status = "disabled";
+ };
+
+ blsp1_spi0: spi@7575000 {
+ compatible = "qcom,spi-qup-v2.2.1";
+ reg = <0x07575000 0x600>;
+ interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_QUP1_SPI_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp1_spi0_default>;
+ pinctrl-1 = <&blsp1_spi0_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp2_i2c0: i2c@75b5000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x075b5000 0x1000>;
+ interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_AHB_CLK>,
+ <&gcc GCC_BLSP2_QUP1_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_i2c0_default>;
+ pinctrl-1 = <&blsp2_i2c0_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ tsens0: thermal-sensor@4a8000 {
+ compatible = "qcom,msm8996-tsens";
+ reg = <0x4a8000 0x2000>;
+ #thermal-sensor-cells = <1>;
+ };
+
+ blsp2_uart1: serial@75b0000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0x75b0000 0x1000>;
+ interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_UART2_APPS_CLK>,
+ <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "core", "iface";
+ status = "disabled";
+ };
+
+ blsp2_i2c1: i2c@75b6000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x075b6000 0x1000>;
+ interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_AHB_CLK>,
+ <&gcc GCC_BLSP2_QUP2_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_i2c1_default>;
+ pinctrl-1 = <&blsp2_i2c1_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp2_uart2: serial@75b1000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0x075b1000 0x1000>;
+ interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_UART3_APPS_CLK>,
+ <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "core", "iface";
+ status = "disabled";
+ };
+
+ blsp1_i2c2: i2c@7577000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x07577000 0x1000>;
+ interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_AHB_CLK>,
+ <&gcc GCC_BLSP1_QUP3_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp1_i2c2_default>;
+ pinctrl-1 = <&blsp1_i2c2_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp2_spi5: spi@75ba000{
+ compatible = "qcom,spi-qup-v2.2.1";
+ reg = <0x075ba000 0x600>;
+ interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_QUP6_SPI_APPS_CLK>,
+ <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "core", "iface";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_spi5_default>;
+ pinctrl-1 = <&blsp2_spi5_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ sdhc2: sdhci@74a4900 {
+ status = "disabled";
+ compatible = "qcom,sdhci-msm-v4";
+ reg = <0x74a4900 0x314>, <0x74a4000 0x800>;
+ reg-names = "hc_mem", "core_mem";
+
+ interrupts = <0 125 IRQ_TYPE_LEVEL_HIGH>,
+ <0 221 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hc_irq", "pwr_irq";
+
+ clock-names = "iface", "core", "xo";
+ clocks = <&gcc GCC_SDCC2_AHB_CLK>,
+ <&gcc GCC_SDCC2_APPS_CLK>,
+ <&xo_board>;
+ bus-width = <4>;
+ };
+
+ msmgpio: pinctrl@1010000 {
+ compatible = "qcom,msm8996-pinctrl";
+ reg = <0x01010000 0x300000>;
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ timer@9840000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ compatible = "arm,armv7-timer-mem";
+ reg = <0x09840000 0x1000>;
+ clock-frequency = <19200000>;
+
+ frame@9850000 {
+ frame-number = <0>;
+ interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x09850000 0x1000>,
+ <0x09860000 0x1000>;
+ };
+
+ frame@9870000 {
+ frame-number = <1>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x09870000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@9880000 {
+ frame-number = <2>;
+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x09880000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@9890000 {
+ frame-number = <3>;
+ interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x09890000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@98a0000 {
+ frame-number = <4>;
+ interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x098a0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@98b0000 {
+ frame-number = <5>;
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x098b0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@98c0000 {
+ frame-number = <6>;
+ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x098c0000 0x1000>;
+ status = "disabled";
+ };
+ };
+
+ spmi_bus: qcom,spmi@400f000 {
+ compatible = "qcom,spmi-pmic-arb";
+ reg = <0x400f000 0x1000>,
+ <0x4400000 0x800000>,
+ <0x4c00000 0x800000>,
+ <0x5800000 0x200000>,
+ <0x400a000 0x002100>;
+ reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
+ interrupt-names = "periph_irq";
+ interrupts = <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,ee = <0>;
+ qcom,channel = <0>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ interrupt-controller;
+ #interrupt-cells = <4>;
+ };
+
+ ufsphy: phy@627000 {
+ compatible = "qcom,msm8996-ufs-phy-qmp-14nm";
+ reg = <0x627000 0xda8>;
+ reg-names = "phy_mem";
+ #phy-cells = <0>;
+
+ vdda-phy-supply = <&pm8994_l28>;
+ vdda-pll-supply = <&pm8994_l12>;
+
+ vdda-phy-max-microamp = <18380>;
+ vdda-pll-max-microamp = <9440>;
+
+ vddp-ref-clk-supply = <&pm8994_l25>;
+ vddp-ref-clk-max-microamp = <100>;
+ vddp-ref-clk-always-on;
+
+ clock-names = "ref_clk_src", "ref_clk";
+ clocks = <&rpmcc RPM_SMD_LN_BB_CLK>,
+ <&gcc GCC_UFS_CLKREF_CLK>;
+ status = "disabled";
+ };
+
+ ufshc@624000 {
+ compatible = "qcom,ufshc";
+ reg = <0x624000 0x2500>;
+ interrupts = <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>;
+
+ phys = <&ufsphy>;
+ phy-names = "ufsphy";
+
+ vcc-supply = <&pm8994_l20>;
+ vccq-supply = <&pm8994_l25>;
+ vccq2-supply = <&pm8994_s4>;
+
+ vcc-max-microamp = <600000>;
+ vccq-max-microamp = <450000>;
+ vccq2-max-microamp = <450000>;
+
+ power-domains = <&gcc UFS_GDSC>;
+
+ clock-names =
+ "core_clk_src",
+ "core_clk",
+ "bus_clk",
+ "bus_aggr_clk",
+ "iface_clk",
+ "core_clk_unipro_src",
+ "core_clk_unipro",
+ "core_clk_ice",
+ "ref_clk",
+ "tx_lane0_sync_clk",
+ "rx_lane0_sync_clk";
+ clocks =
+ <&gcc UFS_AXI_CLK_SRC>,
+ <&gcc GCC_UFS_AXI_CLK>,
+ <&gcc GCC_SYS_NOC_UFS_AXI_CLK>,
+ <&gcc GCC_AGGRE2_UFS_AXI_CLK>,
+ <&gcc GCC_UFS_AHB_CLK>,
+ <&gcc UFS_ICE_CORE_CLK_SRC>,
+ <&gcc GCC_UFS_UNIPRO_CORE_CLK>,
+ <&gcc GCC_UFS_ICE_CORE_CLK>,
+ <&rpmcc RPM_SMD_LN_BB_CLK>,
+ <&gcc GCC_UFS_TX_SYMBOL_0_CLK>,
+ <&gcc GCC_UFS_RX_SYMBOL_0_CLK>;
+ freq-table-hz =
+ <100000000 200000000>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <150000000 300000000>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>;
+
+ lanes-per-direction = <1>;
+ status = "disabled";
+
+ ufs_variant {
+ compatible = "qcom,ufs_variant";
+ };
+ };
+
+ mmcc: clock-controller@8c0000 {
+ compatible = "qcom,mmcc-msm8996";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ reg = <0x8c0000 0x40000>;
+ assigned-clocks = <&mmcc MMPLL9_PLL>,
+ <&mmcc MMPLL1_PLL>,
+ <&mmcc MMPLL3_PLL>,
+ <&mmcc MMPLL4_PLL>,
+ <&mmcc MMPLL5_PLL>;
+ assigned-clock-rates = <624000000>,
+ <810000000>,
+ <980000000>,
+ <960000000>,
+ <825000000>;
+ };
+
+ qfprom@74000 {
+ compatible = "qcom,qfprom";
+ reg = <0x74000 0x8ff>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ qusb2p_hstx_trim: hstx_trim@24e {
+ reg = <0x24e 0x2>;
+ bits = <5 4>;
+ };
+
+ qusb2s_hstx_trim: hstx_trim@24f {
+ reg = <0x24f 0x1>;
+ bits = <1 4>;
+ };
+ };
+
+ phy@34000 {
+ compatible = "qcom,msm8996-qmp-pcie-phy";
+ reg = <0x34000 0x488>;
+ #clock-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
+ <&gcc GCC_PCIE_PHY_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_CLKREF_CLK>;
+ clock-names = "aux", "cfg_ahb", "ref";
+
+ vdda-phy-supply = <&pm8994_l28>;
+ vdda-pll-supply = <&pm8994_l12>;
+
+ resets = <&gcc GCC_PCIE_PHY_BCR>,
+ <&gcc GCC_PCIE_PHY_COM_BCR>,
+ <&gcc GCC_PCIE_PHY_COM_NOCSR_BCR>;
+ reset-names = "phy", "common", "cfg";
+ status = "disabled";
+
+ pciephy_0: lane@35000 {
+ reg = <0x035000 0x130>,
+ <0x035200 0x200>,
+ <0x035400 0x1dc>;
+ #phy-cells = <0>;
+
+ clock-output-names = "pcie_0_pipe_clk_src";
+ clocks = <&gcc GCC_PCIE_0_PIPE_CLK>;
+ clock-names = "pipe0";
+ resets = <&gcc GCC_PCIE_0_PHY_BCR>;
+ reset-names = "lane0";
+ };
+
+ pciephy_1: lane@36000 {
+ reg = <0x036000 0x130>,
+ <0x036200 0x200>,
+ <0x036400 0x1dc>;
+ #phy-cells = <0>;
+
+ clock-output-names = "pcie_1_pipe_clk_src";
+ clocks = <&gcc GCC_PCIE_1_PIPE_CLK>;
+ clock-names = "pipe1";
+ resets = <&gcc GCC_PCIE_1_PHY_BCR>;
+ reset-names = "lane1";
+ };
+
+ pciephy_2: lane@37000 {
+ reg = <0x037000 0x130>,
+ <0x037200 0x200>,
+ <0x037400 0x1dc>;
+ #phy-cells = <0>;
+
+ clock-output-names = "pcie_2_pipe_clk_src";
+ clocks = <&gcc GCC_PCIE_2_PIPE_CLK>;
+ clock-names = "pipe2";
+ resets = <&gcc GCC_PCIE_2_PHY_BCR>;
+ reset-names = "lane2";
+ };
+ };
+
+ phy@7410000 {
+ compatible = "qcom,msm8996-qmp-usb3-phy";
+ reg = <0x7410000 0x1c4>;
+ #clock-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clocks = <&gcc GCC_USB3_PHY_AUX_CLK>,
+ <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+ <&gcc GCC_USB3_CLKREF_CLK>;
+ clock-names = "aux", "cfg_ahb", "ref";
+
+ vdda-phy-supply = <&pm8994_l28>;
+ vdda-pll-supply = <&pm8994_l12>;
+
+ resets = <&gcc GCC_USB3_PHY_BCR>,
+ <&gcc GCC_USB3PHY_PHY_BCR>;
+ reset-names = "phy", "common";
+ status = "disabled";
+
+ ssusb_phy_0: lane@7410200 {
+ reg = <0x7410200 0x200>,
+ <0x7410400 0x130>,
+ <0x7410600 0x1a8>;
+ #phy-cells = <0>;
+
+ clock-output-names = "usb3_phy_pipe_clk_src";
+ clocks = <&gcc GCC_USB3_PHY_PIPE_CLK>;
+ clock-names = "pipe0";
+ };
+ };
+
+ hsusb_phy1: phy@7411000 {
+ compatible = "qcom,msm8996-qusb2-phy";
+ reg = <0x7411000 0x180>;
+ #phy-cells = <0>;
+
+ clocks = <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+ <&gcc GCC_RX1_USB2_CLKREF_CLK>;
+ clock-names = "cfg_ahb", "ref";
+
+ vdda-pll-supply = <&pm8994_l12>;
+ vdda-phy-dpdm-supply = <&pm8994_l24>;
+
+ resets = <&gcc GCC_QUSB2PHY_PRIM_BCR>;
+ nvmem-cells = <&qusb2p_hstx_trim>;
+ status = "disabled";
+ };
+
+ hsusb_phy2: phy@7412000 {
+ compatible = "qcom,msm8996-qusb2-phy";
+ reg = <0x7412000 0x180>;
+ #phy-cells = <0>;
+
+ clocks = <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+ <&gcc GCC_RX2_USB2_CLKREF_CLK>;
+ clock-names = "cfg_ahb", "ref";
+
+ vdda-pll-supply = <&pm8994_l12>;
+ vdda-phy-dpdm-supply = <&pm8994_l24>;
+
+ resets = <&gcc GCC_QUSB2PHY_SEC_BCR>;
+ nvmem-cells = <&qusb2s_hstx_trim>;
+ status = "disabled";
+ };
+
+ usb2: usb@7600000 {
+ compatible = "qcom,dwc3";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clocks = <&gcc GCC_PERIPH_NOC_USB20_AHB_CLK>,
+ <&gcc GCC_USB20_MASTER_CLK>,
+ <&gcc GCC_USB20_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB20_SLEEP_CLK>,
+ <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
+
+ assigned-clocks = <&gcc GCC_USB20_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB20_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <60000000>;
+
+ power-domains = <&gcc USB30_GDSC>;
+ status = "disabled";
+
+ dwc3@7600000 {
+ compatible = "snps,dwc3";
+ reg = <0x7600000 0xcc00>;
+ interrupts = <0 138 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&hsusb_phy2>;
+ phy-names = "usb2-phy";
+ };
+ };
+
+ usb3: usb@6a00000 {
+ compatible = "qcom,dwc3";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clocks = <&gcc GCC_SYS_NOC_USB3_AXI_CLK>,
+ <&gcc GCC_USB30_MASTER_CLK>,
+ <&gcc GCC_AGGRE2_USB3_AXI_CLK>,
+ <&gcc GCC_USB30_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB30_SLEEP_CLK>,
+ <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
+
+ assigned-clocks = <&gcc GCC_USB30_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB30_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <120000000>;
+
+ power-domains = <&gcc USB30_GDSC>;
+ status = "disabled";
+
+ dwc3@6a00000 {
+ compatible = "snps,dwc3";
+ reg = <0x6a00000 0xcc00>;
+ interrupts = <0 131 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&hsusb_phy1>, <&ssusb_phy_0>;
+ phy-names = "usb2-phy", "usb3-phy";
+ };
+ };
+
+ agnoc@0 {
+ power-domains = <&gcc AGGRE0_NOC_GDSC>;
+ compatible = "simple-pm-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ pcie0: pcie@600000 {
+ compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
+ status = "disabled";
+ power-domains = <&gcc PCIE0_GDSC>;
+ bus-range = <0x00 0xff>;
+ num-lanes = <1>;
+
+ reg = <0x00600000 0x2000>,
+ <0x0c000000 0xf1d>,
+ <0x0c000f20 0xa8>,
+ <0x0c100000 0x100000>;
+ reg-names = "parf", "dbi", "elbi","config";
+
+ phys = <&pciephy_0>;
+ phy-names = "pciephy";
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x01000000 0x0 0x0c200000 0x0c200000 0x0 0x100000>,
+ <0x02000000 0x0 0x0c300000 0x0c300000 0x0 0xd00000>;
+
+ interrupts = <GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 244 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+ <0 0 0 2 &intc 0 245 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+ <0 0 0 3 &intc 0 247 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+ <0 0 0 4 &intc 0 248 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pcie0_clkreq_default &pcie0_perst_default &pcie0_wake_default>;
+ pinctrl-1 = <&pcie0_clkreq_sleep &pcie0_perst_default &pcie0_wake_sleep>;
+
+
+ vdda-supply = <&pm8994_l28>;
+
+ linux,pci-domain = <0>;
+
+ clocks = <&gcc GCC_PCIE_0_PIPE_CLK>,
+ <&gcc GCC_PCIE_0_AUX_CLK>,
+ <&gcc GCC_PCIE_0_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_0_MSTR_AXI_CLK>,
+ <&gcc GCC_PCIE_0_SLV_AXI_CLK>;
+
+ clock-names = "pipe",
+ "aux",
+ "cfg",
+ "bus_master",
+ "bus_slave";
+
+ };
+
+ pcie1: pcie@608000 {
+ compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
+ power-domains = <&gcc PCIE1_GDSC>;
+ bus-range = <0x00 0xff>;
+ num-lanes = <1>;
+
+ status = "disabled";
+
+ reg = <0x00608000 0x2000>,
+ <0x0d000000 0xf1d>,
+ <0x0d000f20 0xa8>,
+ <0x0d100000 0x100000>;
+
+ reg-names = "parf", "dbi", "elbi","config";
+
+ phys = <&pciephy_1>;
+ phy-names = "pciephy";
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x01000000 0x0 0x0d200000 0x0d200000 0x0 0x100000>,
+ <0x02000000 0x0 0x0d300000 0x0d300000 0x0 0xd00000>;
+
+ interrupts = <GIC_SPI 413 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 272 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+ <0 0 0 2 &intc 0 273 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+ <0 0 0 3 &intc 0 274 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+ <0 0 0 4 &intc 0 275 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pcie1_clkreq_default &pcie1_perst_default &pcie1_wake_default>;
+ pinctrl-1 = <&pcie1_clkreq_sleep &pcie1_perst_default &pcie1_wake_sleep>;
+
+
+ vdda-supply = <&pm8994_l28>;
+ linux,pci-domain = <1>;
+
+ clocks = <&gcc GCC_PCIE_1_PIPE_CLK>,
+ <&gcc GCC_PCIE_1_AUX_CLK>,
+ <&gcc GCC_PCIE_1_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_1_MSTR_AXI_CLK>,
+ <&gcc GCC_PCIE_1_SLV_AXI_CLK>;
+
+ clock-names = "pipe",
+ "aux",
+ "cfg",
+ "bus_master",
+ "bus_slave";
+ };
+
+ pcie2: pcie@610000 {
+ compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
+ power-domains = <&gcc PCIE2_GDSC>;
+ bus-range = <0x00 0xff>;
+ num-lanes = <1>;
+ status = "disabled";
+ reg = <0x00610000 0x2000>,
+ <0x0e000000 0xf1d>,
+ <0x0e000f20 0xa8>,
+ <0x0e100000 0x100000>;
+
+ reg-names = "parf", "dbi", "elbi","config";
+
+ phys = <&pciephy_2>;
+ phy-names = "pciephy";
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x01000000 0x0 0x0e200000 0x0e200000 0x0 0x100000>,
+ <0x02000000 0x0 0x0e300000 0x0e300000 0x0 0x1d00000>;
+
+ device_type = "pci";
+
+ interrupts = <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 142 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+ <0 0 0 2 &intc 0 143 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+ <0 0 0 3 &intc 0 144 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+ <0 0 0 4 &intc 0 145 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pcie2_clkreq_default &pcie2_perst_default &pcie2_wake_default>;
+ pinctrl-1 = <&pcie2_clkreq_sleep &pcie2_perst_default &pcie2_wake_sleep >;
+
+ vdda-supply = <&pm8994_l28>;
+
+ linux,pci-domain = <2>;
+ clocks = <&gcc GCC_PCIE_2_PIPE_CLK>,
+ <&gcc GCC_PCIE_2_AUX_CLK>,
+ <&gcc GCC_PCIE_2_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_2_MSTR_AXI_CLK>,
+ <&gcc GCC_PCIE_2_SLV_AXI_CLK>;
+
+ clock-names = "pipe",
+ "aux",
+ "cfg",
+ "bus_master",
+ "bus_slave";
+ };
+ };
+
+ adreno_smmu: arm,smmu@b40000 {
+ compatible = "qcom,msm8996-smmu-v2";
+ reg = <0xb40000 0x10000>;
+
+ #global-interrupts = <1>;
+ interrupts = <0 334 0>,
+ <0 329 0>,
+ <0 330 0>;
+ #iommu-cells = <1>;
+
+ clocks = <&mmcc GPU_AHB_CLK>,
+ <&gcc GCC_MMSS_BIMC_GFX_CLK>;
+ clock-names = "bus", "iface";
+
+ power-domains = <&mmcc GPU_GDSC>;
+
+ status = "okay";
+ };
+
+ gpu@b00000 {
+ compatible = "qcom,adreno-530.2", "qcom,adreno";
+ #stream-id-cells = <16>;
+
+ reg = <0xb00000 0x3f000>;
+ reg-names = "kgsl_3d0_reg_memory";
+
+ interrupts = <0 300 0>;
+ interrupt-names = "kgsl_3d0_irq";
+
+ clocks = <&mmcc GPU_GX_GFX3D_CLK>,
+ <&mmcc GPU_AHB_CLK>,
+ <&mmcc GPU_GX_RBBMTIMER_CLK>,
+ <&gcc GCC_BIMC_GFX_CLK>,
+ <&gcc GCC_MMSS_BIMC_GFX_CLK>;
+
+ clock-names = "core",
+ "iface",
+ "rbbmtimer",
+ "mem",
+ "mem_iface";
+
+ power-domains = <&mmcc GPU_GDSC>;
+ iommus = <&adreno_smmu 0>;
+
+ qcom,gpu-quirk-two-pass-use-wfi;
+ qcom,gpu-quirk-fault-detect-mask;
+
+ /* This is a safe speed for bring up in all bin levels.
+ * This isn't the fastest the chip can go, but we can
+ * get there eventually */
+ qcom,gpu-pwrlevels {
+ compatible = "qcom,gpu-pwrlevels";
+ qcom,gpu-pwrlevel@0 {
+ qcom,gpu-freq = <510000000>;
+ };
+ qcom,gpu-pwrlevel@1 {
+ qcom,gpu-freq = <27000000>;
+ };
+ };
+
+ zap-shader {
+ memory-region = <&zap_shader_region>;
+ };
+ };
+
+ mdp_smmu: arm,smmu@d00000 {
+ compatible = "qcom,msm8996-smmu-v2";
+ reg = <0xd00000 0x10000>;
+
+ #global-interrupts = <1>;
+ interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>;
+ #iommu-cells = <1>;
+
+ power-domains = <&mmcc MDSS_GDSC>;
+
+ clocks = <&mmcc SMMU_MDP_AHB_CLK>,
+ <&mmcc SMMU_MDP_AXI_CLK>;
+ clock-names = "iface", "bus";
+
+ status = "okay";
+ };
+
+ mdss: mdss@900000 {
+ compatible = "qcom,mdss";
+
+ reg = <0x900000 0x1000>,
+ <0x9b0000 0x1040>,
+ <0x9b8000 0x1040>;
+ reg-names = "mdss_phys",
+ "vbif_phys",
+ "vbif_nrt_phys";
+
+ power-domains = <&mmcc MDSS_GDSC>;
+ interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ clocks = <&mmcc MDSS_AHB_CLK>;
+ clock-names = "iface_clk";
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ mdp: mdp@901000 {
+ compatible = "qcom,mdp5";
+ reg = <0x901000 0x90000>;
+ reg-names = "mdp_phys";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&mmcc MDSS_AHB_CLK>,
+ <&mmcc MDSS_AXI_CLK>,
+ <&mmcc MDSS_MDP_CLK>,
+ <&mmcc SMMU_MDP_AXI_CLK>,
+ <&mmcc MDSS_VSYNC_CLK>;
+ clock-names = "iface_clk",
+ "bus_clk",
+ "core_clk",
+ "iommu_clk",
+ "vsync_clk";
+
+ iommus = <&mdp_smmu 0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ mdp5_intf3_out: endpoint {
+ remote-endpoint = <&hdmi_in>;
+ };
+ };
+ };
+ };
+
+ hdmi: hdmi-tx@9a0000 {
+ compatible = "qcom,hdmi-tx-8996";
+ reg = <0x009a0000 0x50c>,
+ <0x00070000 0x6158>,
+ <0x009e0000 0xfff>;
+ reg-names = "core_physical",
+ "qfprom_physical",
+ "hdcp_physical";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&mmcc MDSS_MDP_CLK>,
+ <&mmcc MDSS_AHB_CLK>,
+ <&mmcc MDSS_HDMI_CLK>,
+ <&mmcc MDSS_HDMI_AHB_CLK>,
+ <&mmcc MDSS_EXTPCLK_CLK>;
+ clock-names =
+ "mdp_core_clk",
+ "iface_clk",
+ "core_clk",
+ "alt_iface_clk",
+ "extp_clk";
+
+ phys = <&hdmi_phy>;
+ phy-names = "hdmi_phy";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ hdmi_in: endpoint {
+ remote-endpoint = <&mdp5_intf3_out>;
+ };
+ };
+ };
+ };
+
+ hdmi_phy: hdmi-phy@9a0600 {
+ compatible = "qcom,hdmi-phy-8996";
+ reg = <0x9a0600 0x1c4>,
+ <0x9a0a00 0x124>,
+ <0x9a0c00 0x124>,
+ <0x9a0e00 0x124>,
+ <0x9a1000 0x124>,
+ <0x9a1200 0x0c8>;
+ reg-names = "hdmi_pll",
+ "hdmi_tx_l0",
+ "hdmi_tx_l1",
+ "hdmi_tx_l2",
+ "hdmi_tx_l3",
+ "hdmi_phy";
+
+ clocks = <&mmcc MDSS_AHB_CLK>,
+ <&gcc GCC_HDMI_CLKREF_CLK>;
+ clock-names = "iface_clk",
+ "ref_clk";
+ };
+ };
+ };
+
+ adsp-pil {
+ compatible = "qcom,msm8996-adsp-pil";
+
+ interrupts-extended = <&intc 0 162 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 3 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog", "fatal", "ready",
+ "handover", "stop-ack";
+
+ clocks = <&xo_board>;
+ clock-names = "xo";
+
+ memory-region = <&adsp_region>;
+
+ qcom,smem-states = <&adsp_smp2p_out 0>;
+ qcom,smem-state-names = "stop";
+
+ smd-edge {
+ interrupts = <GIC_SPI 156 IRQ_TYPE_EDGE_RISING>;
+
+ label = "lpass";
+ qcom,ipc = <&apcs 16 8>;
+ qcom,smd-edge = <1>;
+ qcom,remote-pid = <2>;
+ };
+ };
+
+ adsp-smp2p {
+ compatible = "qcom,smp2p";
+ qcom,smem = <443>, <429>;
+
+ interrupts = <0 158 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,ipc = <&apcs 16 10>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <2>;
+
+ adsp_smp2p_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ adsp_smp2p_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ modem-smp2p {
+ compatible = "qcom,smp2p";
+ qcom,smem = <435>, <428>;
+
+ interrupts = <GIC_SPI 451 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,ipc = <&apcs 16 14>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <1>;
+
+ modem_smp2p_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ modem_smp2p_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ smp2p-slpi {
+ compatible = "qcom,smp2p";
+ qcom,smem = <481>, <430>;
+
+ interrupts = <GIC_SPI 178 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,ipc = <&apcs 16 26>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <3>;
+
+ slpi_smp2p_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ slpi_smp2p_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+ };
+
+};
+#include "msm8996-pins.dtsi"
+#include "pm8994.dtsi"
+#include "pmi8994.dtsi"
diff --git a/rr-cache/9d52687238be7967036fe7e752f5c0259d75823d/preimage b/rr-cache/9d52687238be7967036fe7e752f5c0259d75823d/preimage
new file mode 100644
index 0000000..25de002
--- /dev/null
+++ b/rr-cache/9d52687238be7967036fe7e752f5c0259d75823d/preimage
@@ -0,0 +1,1505 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/qcom,gcc-msm8996.h>
+#include <dt-bindings/clock/qcom,mmcc-msm8996.h>
+#include <dt-bindings/clock/qcom,rpmcc.h>
+
+/ {
+ model = "Qualcomm Technologies, Inc. MSM8996";
+
+ interrupt-parent = <&intc>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen { };
+
+ memory {
+ device_type = "memory";
+ /* We expect the bootloader to fill in the reg */
+ reg = <0 0 0 0>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ mba_region: mba@91500000 {
+ reg = <0x0 0x91500000 0x0 0x200000>;
+ no-map;
+ };
+
+ slpi_region: slpi@90b00000 {
+ reg = <0x0 0x90b00000 0x0 0xa00000>;
+ no-map;
+ };
+
+ venus_region: venus@90400000 {
+ reg = <0x0 0x90400000 0x0 0x700000>;
+ no-map;
+ };
+
+ adsp_region: adsp@8ea00000 {
+ reg = <0x0 0x8ea00000 0x0 0x1a00000>;
+ no-map;
+ };
+
+ mpss_region: mpss@88800000 {
+ reg = <0x0 0x88800000 0x0 0x6200000>;
+ no-map;
+ };
+
+ smem_mem: smem-mem@86000000 {
+ reg = <0x0 0x86000000 0x0 0x200000>;
+ no-map;
+ };
+
+ memory@85800000 {
+ reg = <0x0 0x85800000 0x0 0x800000>;
+ no-map;
+ };
+
+ memory@86200000 {
+ reg = <0x0 0x86200000 0x0 0x2600000>;
+ no-map;
+ };
+
+ rmtfs@86700000 {
+ compatible = "qcom,rmtfs-mem";
+
+ size = <0x0 0x200000>;
+ alloc-ranges = <0x0 0xa0000000 0x0 0x2000000>;
+ no-map;
+
+ qcom,client-id = <1>;
+ qcom,vmid = <15>;
+ };
+
+ zap_shader_region: gpu@8f200000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x8f200000 0 0x2300000>;
+ no-map;
+ };
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ CPU0: cpu@0 {
+ device_type = "cpu";
+ compatible = "qcom,kryo";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ next-level-cache = <&L2_0>;
+ L2_0: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ };
+ };
+
+ CPU1: cpu@1 {
+ device_type = "cpu";
+ compatible = "qcom,kryo";
+ reg = <0x0 0x1>;
+ enable-method = "psci";
+ next-level-cache = <&L2_0>;
+ };
+
+ CPU2: cpu@100 {
+ device_type = "cpu";
+ compatible = "qcom,kryo";
+ reg = <0x0 0x100>;
+ enable-method = "psci";
+ next-level-cache = <&L2_1>;
+ L2_1: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ };
+ };
+
+ CPU3: cpu@101 {
+ device_type = "cpu";
+ compatible = "qcom,kryo";
+ reg = <0x0 0x101>;
+ enable-method = "psci";
+ next-level-cache = <&L2_1>;
+ };
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&CPU0>;
+ };
+
+ core1 {
+ cpu = <&CPU1>;
+ };
+ };
+
+ cluster1 {
+ core0 {
+ cpu = <&CPU2>;
+ };
+
+ core1 {
+ cpu = <&CPU3>;
+ };
+ };
+ };
+ };
+
+ thermal-zones {
+ cpu-thermal0 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 3>;
+
+ trips {
+ cpu_alert0: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit0: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal1 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 5>;
+
+ trips {
+ cpu_alert1: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit1: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal2 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 8>;
+
+ trips {
+ cpu_alert2: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit2: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal3 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 10>;
+
+ trips {
+ cpu_alert3: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit3: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ clocks {
+ xo_board: xo_board {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <19200000>;
+ clock-output-names = "xo_board";
+ };
+
+ sleep_clk: sleep_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32764>;
+ clock-output-names = "sleep_clk";
+ };
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ firmware {
+ scm {
+ compatible = "qcom,scm-msm8996";
+
+ qcom,dload-mode = <&tcsr 0x13000>;
+ };
+ };
+
+ tcsr_mutex: hwlock {
+ compatible = "qcom,tcsr-mutex";
+ syscon = <&tcsr_mutex_regs 0 0x1000>;
+ #hwlock-cells = <1>;
+ };
+
+ smem {
+ compatible = "qcom,smem";
+ memory-region = <&smem_mem>;
+ hwlocks = <&tcsr_mutex 3>;
+ };
+
+ rpm-glink {
+ compatible = "qcom,glink-rpm";
+
+ interrupts = <GIC_SPI 168 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,rpm-msg-ram = <&rpm_msg_ram>;
+
+ mboxes = <&apcs_glb 0>;
+
+ rpm_requests {
+ compatible = "qcom,rpm-msm8996";
+ qcom,glink-channels = "rpm_requests";
+
+ rpmcc: qcom,rpmcc {
+ compatible = "qcom,rpmcc-msm8996";
+ #clock-cells = <1>;
+ };
+
+ pm8994-regulators {
+ compatible = "qcom,rpm-pm8994-regulators";
+
+ pm8994_s1: s1 {};
+ pm8994_s2: s2 {};
+ pm8994_s3: s3 {};
+ pm8994_s4: s4 {};
+ pm8994_s5: s5 {};
+ pm8994_s6: s6 {};
+ pm8994_s7: s7 {};
+ pm8994_s8: s8 {};
+ pm8994_s9: s9 {};
+ pm8994_s10: s10 {};
+ pm8994_s11: s11 {};
+ pm8994_s12: s12 {};
+
+ pm8994_l1: l1 {};
+ pm8994_l2: l2 {};
+ pm8994_l3: l3 {};
+ pm8994_l4: l4 {};
+ pm8994_l5: l5 {};
+ pm8994_l6: l6 {};
+ pm8994_l7: l7 {};
+ pm8994_l8: l8 {};
+ pm8994_l9: l9 {};
+ pm8994_l10: l10 {};
+ pm8994_l11: l11 {};
+ pm8994_l12: l12 {};
+ pm8994_l13: l13 {};
+ pm8994_l14: l14 {};
+ pm8994_l15: l15 {};
+ pm8994_l16: l16 {};
+ pm8994_l17: l17 {};
+ pm8994_l18: l18 {};
+ pm8994_l19: l19 {};
+ pm8994_l20: l20 {};
+ pm8994_l21: l21 {};
+ pm8994_l22: l22 {};
+ pm8994_l23: l23 {};
+ pm8994_l24: l24 {};
+ pm8994_l25: l25 {};
+ pm8994_l26: l26 {};
+ pm8994_l27: l27 {};
+ pm8994_l28: l28 {};
+ pm8994_l29: l29 {};
+ pm8994_l30: l30 {};
+ pm8994_l31: l31 {};
+ pm8994_l32: l32 {};
+ };
+
+ };
+ };
+
+ soc: soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0xffffffff>;
+ compatible = "simple-bus";
+
+ rpm_msg_ram: memory@68000 {
+ compatible = "qcom,rpm-msg-ram";
+ reg = <0x68000 0x6000>;
+ };
+
+ tcsr_mutex_regs: syscon@740000 {
+ compatible = "syscon";
+ reg = <0x740000 0x20000>;
+ };
+
+ tcsr: syscon@7a0000 {
+ compatible = "qcom,tcsr-msm8996", "syscon";
+ reg = <0x7a0000 0x18000>;
+ };
+
+ intc: interrupt-controller@9bc0000 {
+ compatible = "arm,gic-v3";
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ #redistributor-regions = <1>;
+ redistributor-stride = <0x0 0x40000>;
+ reg = <0x09bc0000 0x10000>,
+ <0x09c00000 0x100000>;
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ apcs: syscon@9820000 {
+ compatible = "syscon";
+ reg = <0x9820000 0x1000>;
+ };
+
+ apcs_glb: mailbox@9820000 {
+ compatible = "qcom,msm8996-apcs-hmss-global";
+ reg = <0x9820000 0x1000>;
+
+ #mbox-cells = <1>;
+ };
+
+ gcc: clock-controller@300000 {
+ compatible = "qcom,gcc-msm8996";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ reg = <0x300000 0x90000>;
+ };
+
+ kryocc: clock-controller@6400000 {
+ compatible = "qcom,apcc-msm8996";
+ reg = <0x6400000 0x90000>;
+ #clock-cells = <1>;
+ };
+
+ blsp1_uart1: serial@7570000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0x07570000 0x1000>;
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_UART2_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ status = "disabled";
+ };
+
+ blsp1_spi0: spi@7575000 {
+ compatible = "qcom,spi-qup-v2.2.1";
+ reg = <0x07575000 0x600>;
+ interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_QUP1_SPI_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp1_spi0_default>;
+ pinctrl-1 = <&blsp1_spi0_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp2_i2c0: i2c@75b5000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x075b5000 0x1000>;
+ interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_AHB_CLK>,
+ <&gcc GCC_BLSP2_QUP1_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_i2c0_default>;
+ pinctrl-1 = <&blsp2_i2c0_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ tsens0: thermal-sensor@4a8000 {
+ compatible = "qcom,msm8996-tsens";
+ reg = <0x4a8000 0x2000>;
+ #thermal-sensor-cells = <1>;
+ };
+
+ blsp2_uart1: serial@75b0000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0x75b0000 0x1000>;
+ interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_UART2_APPS_CLK>,
+ <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "core", "iface";
+ status = "disabled";
+ };
+
+ blsp2_i2c1: i2c@75b6000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x075b6000 0x1000>;
+ interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_AHB_CLK>,
+ <&gcc GCC_BLSP2_QUP2_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_i2c1_default>;
+ pinctrl-1 = <&blsp2_i2c1_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp2_uart2: serial@75b1000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0x075b1000 0x1000>;
+ interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_UART3_APPS_CLK>,
+ <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "core", "iface";
+ status = "disabled";
+ };
+
+ blsp1_i2c2: i2c@7577000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x07577000 0x1000>;
+ interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_AHB_CLK>,
+ <&gcc GCC_BLSP1_QUP3_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp1_i2c2_default>;
+ pinctrl-1 = <&blsp1_i2c2_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp2_spi5: spi@75ba000{
+ compatible = "qcom,spi-qup-v2.2.1";
+ reg = <0x075ba000 0x600>;
+ interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_QUP6_SPI_APPS_CLK>,
+ <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "core", "iface";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_spi5_default>;
+ pinctrl-1 = <&blsp2_spi5_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ sdhc2: sdhci@74a4900 {
+ status = "disabled";
+ compatible = "qcom,sdhci-msm-v4";
+ reg = <0x74a4900 0x314>, <0x74a4000 0x800>;
+ reg-names = "hc_mem", "core_mem";
+
+ interrupts = <0 125 IRQ_TYPE_LEVEL_HIGH>,
+ <0 221 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hc_irq", "pwr_irq";
+
+ clock-names = "iface", "core", "xo";
+ clocks = <&gcc GCC_SDCC2_AHB_CLK>,
+ <&gcc GCC_SDCC2_APPS_CLK>,
+ <&xo_board>;
+ bus-width = <4>;
+ };
+
+ msmgpio: pinctrl@1010000 {
+ compatible = "qcom,msm8996-pinctrl";
+ reg = <0x01010000 0x300000>;
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ timer@9840000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ compatible = "arm,armv7-timer-mem";
+ reg = <0x09840000 0x1000>;
+ clock-frequency = <19200000>;
+
+ frame@9850000 {
+ frame-number = <0>;
+ interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x09850000 0x1000>,
+ <0x09860000 0x1000>;
+ };
+
+ frame@9870000 {
+ frame-number = <1>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x09870000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@9880000 {
+ frame-number = <2>;
+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x09880000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@9890000 {
+ frame-number = <3>;
+ interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x09890000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@98a0000 {
+ frame-number = <4>;
+ interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x098a0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@98b0000 {
+ frame-number = <5>;
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x098b0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@98c0000 {
+ frame-number = <6>;
+ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x098c0000 0x1000>;
+ status = "disabled";
+ };
+ };
+
+ spmi_bus: qcom,spmi@400f000 {
+ compatible = "qcom,spmi-pmic-arb";
+ reg = <0x400f000 0x1000>,
+ <0x4400000 0x800000>,
+ <0x4c00000 0x800000>,
+ <0x5800000 0x200000>,
+ <0x400a000 0x002100>;
+ reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
+ interrupt-names = "periph_irq";
+ interrupts = <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,ee = <0>;
+ qcom,channel = <0>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ interrupt-controller;
+ #interrupt-cells = <4>;
+ };
+
+ ufsphy: phy@627000 {
+ compatible = "qcom,msm8996-ufs-phy-qmp-14nm";
+ reg = <0x627000 0xda8>;
+ reg-names = "phy_mem";
+ #phy-cells = <0>;
+
+ vdda-phy-supply = <&pm8994_l28>;
+ vdda-pll-supply = <&pm8994_l12>;
+
+ vdda-phy-max-microamp = <18380>;
+ vdda-pll-max-microamp = <9440>;
+
+ vddp-ref-clk-supply = <&pm8994_l25>;
+ vddp-ref-clk-max-microamp = <100>;
+ vddp-ref-clk-always-on;
+
+ clock-names = "ref_clk_src", "ref_clk";
+ clocks = <&rpmcc RPM_SMD_LN_BB_CLK>,
+ <&gcc GCC_UFS_CLKREF_CLK>;
+ status = "disabled";
+ };
+
+ ufshc@624000 {
+ compatible = "qcom,ufshc";
+ reg = <0x624000 0x2500>;
+ interrupts = <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>;
+
+ phys = <&ufsphy>;
+ phy-names = "ufsphy";
+
+ vcc-supply = <&pm8994_l20>;
+ vccq-supply = <&pm8994_l25>;
+ vccq2-supply = <&pm8994_s4>;
+
+ vcc-max-microamp = <600000>;
+ vccq-max-microamp = <450000>;
+ vccq2-max-microamp = <450000>;
+
+ power-domains = <&gcc UFS_GDSC>;
+
+ clock-names =
+ "core_clk_src",
+ "core_clk",
+ "bus_clk",
+ "bus_aggr_clk",
+ "iface_clk",
+ "core_clk_unipro_src",
+ "core_clk_unipro",
+ "core_clk_ice",
+ "ref_clk",
+ "tx_lane0_sync_clk",
+ "rx_lane0_sync_clk";
+ clocks =
+ <&gcc UFS_AXI_CLK_SRC>,
+ <&gcc GCC_UFS_AXI_CLK>,
+ <&gcc GCC_SYS_NOC_UFS_AXI_CLK>,
+ <&gcc GCC_AGGRE2_UFS_AXI_CLK>,
+ <&gcc GCC_UFS_AHB_CLK>,
+ <&gcc UFS_ICE_CORE_CLK_SRC>,
+ <&gcc GCC_UFS_UNIPRO_CORE_CLK>,
+ <&gcc GCC_UFS_ICE_CORE_CLK>,
+ <&rpmcc RPM_SMD_LN_BB_CLK>,
+ <&gcc GCC_UFS_TX_SYMBOL_0_CLK>,
+ <&gcc GCC_UFS_RX_SYMBOL_0_CLK>;
+ freq-table-hz =
+ <100000000 200000000>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <150000000 300000000>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>;
+
+ lanes-per-direction = <1>;
+ status = "disabled";
+
+ ufs_variant {
+ compatible = "qcom,ufs_variant";
+ };
+ };
+
+ mmcc: clock-controller@8c0000 {
+ compatible = "qcom,mmcc-msm8996";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ reg = <0x8c0000 0x40000>;
+ assigned-clocks = <&mmcc MMPLL9_PLL>,
+ <&mmcc MMPLL1_PLL>,
+ <&mmcc MMPLL3_PLL>,
+ <&mmcc MMPLL4_PLL>,
+ <&mmcc MMPLL5_PLL>;
+ assigned-clock-rates = <624000000>,
+ <810000000>,
+ <980000000>,
+ <960000000>,
+ <825000000>;
+ };
+
+ qfprom@74000 {
+ compatible = "qcom,qfprom";
+ reg = <0x74000 0x8ff>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ qusb2p_hstx_trim: hstx_trim@24e {
+ reg = <0x24e 0x2>;
+ bits = <5 4>;
+ };
+
+ qusb2s_hstx_trim: hstx_trim@24f {
+ reg = <0x24f 0x1>;
+ bits = <1 4>;
+ };
+ };
+
+ phy@34000 {
+ compatible = "qcom,msm8996-qmp-pcie-phy";
+ reg = <0x34000 0x488>;
+ #clock-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
+ <&gcc GCC_PCIE_PHY_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_CLKREF_CLK>;
+ clock-names = "aux", "cfg_ahb", "ref";
+
+ vdda-phy-supply = <&pm8994_l28>;
+ vdda-pll-supply = <&pm8994_l12>;
+
+ resets = <&gcc GCC_PCIE_PHY_BCR>,
+ <&gcc GCC_PCIE_PHY_COM_BCR>,
+ <&gcc GCC_PCIE_PHY_COM_NOCSR_BCR>;
+ reset-names = "phy", "common", "cfg";
+ status = "disabled";
+
+ pciephy_0: lane@35000 {
+ reg = <0x035000 0x130>,
+ <0x035200 0x200>,
+ <0x035400 0x1dc>;
+ #phy-cells = <0>;
+
+ clock-output-names = "pcie_0_pipe_clk_src";
+ clocks = <&gcc GCC_PCIE_0_PIPE_CLK>;
+ clock-names = "pipe0";
+ resets = <&gcc GCC_PCIE_0_PHY_BCR>;
+ reset-names = "lane0";
+ };
+
+ pciephy_1: lane@36000 {
+ reg = <0x036000 0x130>,
+ <0x036200 0x200>,
+ <0x036400 0x1dc>;
+ #phy-cells = <0>;
+
+ clock-output-names = "pcie_1_pipe_clk_src";
+ clocks = <&gcc GCC_PCIE_1_PIPE_CLK>;
+ clock-names = "pipe1";
+ resets = <&gcc GCC_PCIE_1_PHY_BCR>;
+ reset-names = "lane1";
+ };
+
+ pciephy_2: lane@37000 {
+ reg = <0x037000 0x130>,
+ <0x037200 0x200>,
+ <0x037400 0x1dc>;
+ #phy-cells = <0>;
+
+ clock-output-names = "pcie_2_pipe_clk_src";
+ clocks = <&gcc GCC_PCIE_2_PIPE_CLK>;
+ clock-names = "pipe2";
+ resets = <&gcc GCC_PCIE_2_PHY_BCR>;
+ reset-names = "lane2";
+ };
+ };
+
+ phy@7410000 {
+ compatible = "qcom,msm8996-qmp-usb3-phy";
+ reg = <0x7410000 0x1c4>;
+ #clock-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clocks = <&gcc GCC_USB3_PHY_AUX_CLK>,
+ <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+ <&gcc GCC_USB3_CLKREF_CLK>;
+ clock-names = "aux", "cfg_ahb", "ref";
+
+ vdda-phy-supply = <&pm8994_l28>;
+ vdda-pll-supply = <&pm8994_l12>;
+
+ resets = <&gcc GCC_USB3_PHY_BCR>,
+ <&gcc GCC_USB3PHY_PHY_BCR>;
+ reset-names = "phy", "common";
+ status = "disabled";
+
+ ssusb_phy_0: lane@7410200 {
+ reg = <0x7410200 0x200>,
+ <0x7410400 0x130>,
+ <0x7410600 0x1a8>;
+ #phy-cells = <0>;
+
+ clock-output-names = "usb3_phy_pipe_clk_src";
+ clocks = <&gcc GCC_USB3_PHY_PIPE_CLK>;
+ clock-names = "pipe0";
+ };
+ };
+
+ hsusb_phy1: phy@7411000 {
+ compatible = "qcom,msm8996-qusb2-phy";
+ reg = <0x7411000 0x180>;
+ #phy-cells = <0>;
+
+ clocks = <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+ <&gcc GCC_RX1_USB2_CLKREF_CLK>;
+ clock-names = "cfg_ahb", "ref";
+
+ vdda-pll-supply = <&pm8994_l12>;
+ vdda-phy-dpdm-supply = <&pm8994_l24>;
+
+ resets = <&gcc GCC_QUSB2PHY_PRIM_BCR>;
+ nvmem-cells = <&qusb2p_hstx_trim>;
+ status = "disabled";
+ };
+
+ hsusb_phy2: phy@7412000 {
+ compatible = "qcom,msm8996-qusb2-phy";
+ reg = <0x7412000 0x180>;
+ #phy-cells = <0>;
+
+ clocks = <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+ <&gcc GCC_RX2_USB2_CLKREF_CLK>;
+ clock-names = "cfg_ahb", "ref";
+
+ vdda-pll-supply = <&pm8994_l12>;
+ vdda-phy-dpdm-supply = <&pm8994_l24>;
+
+ resets = <&gcc GCC_QUSB2PHY_SEC_BCR>;
+ nvmem-cells = <&qusb2s_hstx_trim>;
+ status = "disabled";
+ };
+
+ usb2: usb@7600000 {
+ compatible = "qcom,dwc3";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clocks = <&gcc GCC_PERIPH_NOC_USB20_AHB_CLK>,
+ <&gcc GCC_USB20_MASTER_CLK>,
+ <&gcc GCC_USB20_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB20_SLEEP_CLK>,
+ <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
+
+ assigned-clocks = <&gcc GCC_USB20_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB20_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <60000000>;
+
+ power-domains = <&gcc USB30_GDSC>;
+ status = "disabled";
+
+ dwc3@7600000 {
+ compatible = "snps,dwc3";
+ reg = <0x7600000 0xcc00>;
+ interrupts = <0 138 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&hsusb_phy2>;
+ phy-names = "usb2-phy";
+ };
+ };
+
+ usb3: usb@6a00000 {
+ compatible = "qcom,dwc3";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clocks = <&gcc GCC_SYS_NOC_USB3_AXI_CLK>,
+ <&gcc GCC_USB30_MASTER_CLK>,
+ <&gcc GCC_AGGRE2_USB3_AXI_CLK>,
+ <&gcc GCC_USB30_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB30_SLEEP_CLK>,
+ <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
+
+ assigned-clocks = <&gcc GCC_USB30_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB30_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <120000000>;
+
+ power-domains = <&gcc USB30_GDSC>;
+ status = "disabled";
+
+ dwc3@6a00000 {
+ compatible = "snps,dwc3";
+ reg = <0x6a00000 0xcc00>;
+ interrupts = <0 131 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&hsusb_phy1>, <&ssusb_phy_0>;
+ phy-names = "usb2-phy", "usb3-phy";
+ };
+ };
+
+ agnoc@0 {
+ power-domains = <&gcc AGGRE0_NOC_GDSC>;
+ compatible = "simple-pm-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ pcie0: pcie@600000 {
+ compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
+ status = "disabled";
+ power-domains = <&gcc PCIE0_GDSC>;
+ bus-range = <0x00 0xff>;
+ num-lanes = <1>;
+
+ reg = <0x00600000 0x2000>,
+ <0x0c000000 0xf1d>,
+ <0x0c000f20 0xa8>,
+ <0x0c100000 0x100000>;
+ reg-names = "parf", "dbi", "elbi","config";
+
+ phys = <&pciephy_0>;
+ phy-names = "pciephy";
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x01000000 0x0 0x0c200000 0x0c200000 0x0 0x100000>,
+ <0x02000000 0x0 0x0c300000 0x0c300000 0x0 0xd00000>;
+
+ interrupts = <GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 244 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+ <0 0 0 2 &intc 0 245 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+ <0 0 0 3 &intc 0 247 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+ <0 0 0 4 &intc 0 248 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pcie0_clkreq_default &pcie0_perst_default &pcie0_wake_default>;
+ pinctrl-1 = <&pcie0_clkreq_sleep &pcie0_perst_default &pcie0_wake_sleep>;
+
+
+ vdda-supply = <&pm8994_l28>;
+
+ linux,pci-domain = <0>;
+
+ clocks = <&gcc GCC_PCIE_0_PIPE_CLK>,
+ <&gcc GCC_PCIE_0_AUX_CLK>,
+ <&gcc GCC_PCIE_0_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_0_MSTR_AXI_CLK>,
+ <&gcc GCC_PCIE_0_SLV_AXI_CLK>;
+
+ clock-names = "pipe",
+ "aux",
+ "cfg",
+ "bus_master",
+ "bus_slave";
+
+ };
+
+ pcie1: pcie@608000 {
+ compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
+ power-domains = <&gcc PCIE1_GDSC>;
+ bus-range = <0x00 0xff>;
+ num-lanes = <1>;
+
+ status = "disabled";
+
+ reg = <0x00608000 0x2000>,
+ <0x0d000000 0xf1d>,
+ <0x0d000f20 0xa8>,
+ <0x0d100000 0x100000>;
+
+ reg-names = "parf", "dbi", "elbi","config";
+
+ phys = <&pciephy_1>;
+ phy-names = "pciephy";
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x01000000 0x0 0x0d200000 0x0d200000 0x0 0x100000>,
+ <0x02000000 0x0 0x0d300000 0x0d300000 0x0 0xd00000>;
+
+ interrupts = <GIC_SPI 413 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 272 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+ <0 0 0 2 &intc 0 273 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+ <0 0 0 3 &intc 0 274 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+ <0 0 0 4 &intc 0 275 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pcie1_clkreq_default &pcie1_perst_default &pcie1_wake_default>;
+ pinctrl-1 = <&pcie1_clkreq_sleep &pcie1_perst_default &pcie1_wake_sleep>;
+
+
+ vdda-supply = <&pm8994_l28>;
+ linux,pci-domain = <1>;
+
+ clocks = <&gcc GCC_PCIE_1_PIPE_CLK>,
+ <&gcc GCC_PCIE_1_AUX_CLK>,
+ <&gcc GCC_PCIE_1_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_1_MSTR_AXI_CLK>,
+ <&gcc GCC_PCIE_1_SLV_AXI_CLK>;
+
+ clock-names = "pipe",
+ "aux",
+ "cfg",
+ "bus_master",
+ "bus_slave";
+ };
+
+ pcie2: pcie@610000 {
+ compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
+ power-domains = <&gcc PCIE2_GDSC>;
+ bus-range = <0x00 0xff>;
+ num-lanes = <1>;
+ status = "disabled";
+ reg = <0x00610000 0x2000>,
+ <0x0e000000 0xf1d>,
+ <0x0e000f20 0xa8>,
+ <0x0e100000 0x100000>;
+
+ reg-names = "parf", "dbi", "elbi","config";
+
+ phys = <&pciephy_2>;
+ phy-names = "pciephy";
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x01000000 0x0 0x0e200000 0x0e200000 0x0 0x100000>,
+ <0x02000000 0x0 0x0e300000 0x0e300000 0x0 0x1d00000>;
+
+ device_type = "pci";
+
+ interrupts = <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 142 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+ <0 0 0 2 &intc 0 143 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+ <0 0 0 3 &intc 0 144 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+ <0 0 0 4 &intc 0 145 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pcie2_clkreq_default &pcie2_perst_default &pcie2_wake_default>;
+ pinctrl-1 = <&pcie2_clkreq_sleep &pcie2_perst_default &pcie2_wake_sleep >;
+
+ vdda-supply = <&pm8994_l28>;
+
+ linux,pci-domain = <2>;
+ clocks = <&gcc GCC_PCIE_2_PIPE_CLK>,
+ <&gcc GCC_PCIE_2_AUX_CLK>,
+ <&gcc GCC_PCIE_2_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_2_MSTR_AXI_CLK>,
+ <&gcc GCC_PCIE_2_SLV_AXI_CLK>;
+
+ clock-names = "pipe",
+ "aux",
+ "cfg",
+ "bus_master",
+ "bus_slave";
+ };
+ };
+
+<<<<<<<
+=======
+ ufsphy1: ufsphy@627000 {
+ compatible = "qcom,msm8996-ufs-phy-qmp-14nm";
+ reg = <0x627000 0xda8>;
+ reg-names = "phy_mem";
+ #phy-cells = <0>;
+ vdda-phy-max-microamp = <18380>;
+ vdda-pll-max-microamp = <9440>;
+
+ vddp-ref-clk-supply = <&pm8994_l25>;
+ vddp-ref-clk-max-microamp = <100>;
+ vddp-ref-clk-always-on;
+ clock-names = "ref_clk_src", "ref_clk";
+ clocks = <&rpmcc RPM_SMD_LN_BB_CLK>,
+ <&gcc GCC_UFS_CLKREF_CLK>;
+ status = "disabled";
+ power-domains = <&gcc UFS_GDSC>;
+
+ };
+
+ ufshc@624000 {
+ compatible = "qcom,ufshc";
+ reg = <0x624000 0x2500>;
+ interrupts = <0 265 0>;
+
+ phys = <&ufsphy1>;
+ phy-names = "ufsphy";
+
+ vcc-supply = <&pm8994_l20>;
+ vccq-supply = <&pm8994_l25>;
+ vccq2-supply = <&pm8994_s4>;
+
+ vcc-max-microamp = <600000>;
+ vccq-max-microamp = <450000>;
+ vccq2-max-microamp = <450000>;
+
+ clock-names =
+ "core_clk_src",
+ "core_clk",
+ "bus_clk",
+ "bus_aggr_clk",
+ "iface_clk",
+ "core_clk_unipro_src",
+ "core_clk_unipro",
+ "core_clk_ice",
+ "ref_clk",
+ "tx_lane0_sync_clk",
+ "rx_lane0_sync_clk";
+ clocks =
+ <&gcc UFS_AXI_CLK_SRC>,
+ <&gcc GCC_UFS_AXI_CLK>,
+ <&gcc GCC_SYS_NOC_UFS_AXI_CLK>,
+ <&gcc GCC_AGGRE2_UFS_AXI_CLK>,
+ <&gcc GCC_UFS_AHB_CLK>,
+ <&gcc UFS_ICE_CORE_CLK_SRC>,
+ <&gcc GCC_UFS_UNIPRO_CORE_CLK>,
+ <&gcc GCC_UFS_ICE_CORE_CLK>,
+ <&rpmcc RPM_SMD_LN_BB_CLK>,
+ <&gcc GCC_UFS_TX_SYMBOL_0_CLK>,
+ <&gcc GCC_UFS_RX_SYMBOL_0_CLK>;
+ freq-table-hz =
+ <100000000 200000000>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <150000000 300000000>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>;
+
+ lanes-per-direction = <1>;
+ status = "disabled";
+
+ ufs_variant {
+ compatible = "qcom,ufs_variant";
+ };
+ };
+
+ adreno_smmu: arm,smmu@b40000 {
+ compatible = "qcom,msm8996-smmu-v2";
+ reg = <0xb40000 0x10000>;
+
+ #global-interrupts = <1>;
+ interrupts = <0 334 0>,
+ <0 329 0>,
+ <0 330 0>;
+ #iommu-cells = <1>;
+
+ clocks = <&mmcc GPU_AHB_CLK>,
+ <&gcc GCC_MMSS_BIMC_GFX_CLK>;
+ clock-names = "bus", "iface";
+
+ power-domains = <&mmcc GPU_GDSC>;
+
+ status = "okay";
+ };
+
+ gpu@b00000 {
+ compatible = "qcom,adreno-530.2", "qcom,adreno";
+ #stream-id-cells = <16>;
+
+ reg = <0xb00000 0x3f000>;
+ reg-names = "kgsl_3d0_reg_memory";
+
+ interrupts = <0 300 0>;
+ interrupt-names = "kgsl_3d0_irq";
+
+ clocks = <&mmcc GPU_GX_GFX3D_CLK>,
+ <&mmcc GPU_AHB_CLK>,
+ <&mmcc GPU_GX_RBBMTIMER_CLK>,
+ <&gcc GCC_BIMC_GFX_CLK>,
+ <&gcc GCC_MMSS_BIMC_GFX_CLK>;
+
+ clock-names = "core",
+ "iface",
+ "rbbmtimer",
+ "mem",
+ "mem_iface";
+
+ power-domains = <&mmcc GPU_GDSC>;
+ iommus = <&adreno_smmu 0>;
+
+ qcom,gpu-quirk-two-pass-use-wfi;
+ qcom,gpu-quirk-fault-detect-mask;
+
+ /* This is a safe speed for bring up in all bin levels.
+ * This isn't the fastest the chip can go, but we can
+ * get there eventually */
+ qcom,gpu-pwrlevels {
+ compatible = "qcom,gpu-pwrlevels";
+ qcom,gpu-pwrlevel@0 {
+ qcom,gpu-freq = <510000000>;
+ };
+ qcom,gpu-pwrlevel@1 {
+ qcom,gpu-freq = <27000000>;
+ };
+ };
+
+ zap-shader {
+ memory-region = <&zap_shader_region>;
+ };
+ };
+
+>>>>>>>
+ mdp_smmu: arm,smmu@d00000 {
+ compatible = "qcom,msm8996-smmu-v2";
+ reg = <0xd00000 0x10000>;
+
+ #global-interrupts = <1>;
+ interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>;
+ #iommu-cells = <1>;
+
+ power-domains = <&mmcc MDSS_GDSC>;
+
+ clocks = <&mmcc SMMU_MDP_AHB_CLK>,
+ <&mmcc SMMU_MDP_AXI_CLK>;
+ clock-names = "iface", "bus";
+
+ status = "okay";
+ };
+
+ mdss: mdss@900000 {
+ compatible = "qcom,mdss";
+
+ reg = <0x900000 0x1000>,
+ <0x9b0000 0x1040>,
+ <0x9b8000 0x1040>;
+ reg-names = "mdss_phys",
+ "vbif_phys",
+ "vbif_nrt_phys";
+
+ power-domains = <&mmcc MDSS_GDSC>;
+ interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ clocks = <&mmcc MDSS_AHB_CLK>;
+ clock-names = "iface_clk";
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ mdp: mdp@901000 {
+ compatible = "qcom,mdp5";
+ reg = <0x901000 0x90000>;
+ reg-names = "mdp_phys";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&mmcc MDSS_AHB_CLK>,
+ <&mmcc MDSS_AXI_CLK>,
+ <&mmcc MDSS_MDP_CLK>,
+ <&mmcc SMMU_MDP_AXI_CLK>,
+ <&mmcc MDSS_VSYNC_CLK>;
+ clock-names = "iface_clk",
+ "bus_clk",
+ "core_clk",
+ "iommu_clk",
+ "vsync_clk";
+
+ iommus = <&mdp_smmu 0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ mdp5_intf3_out: endpoint {
+ remote-endpoint = <&hdmi_in>;
+ };
+ };
+ };
+ };
+
+ hdmi: hdmi-tx@9a0000 {
+ compatible = "qcom,hdmi-tx-8996";
+ reg = <0x009a0000 0x50c>,
+ <0x00070000 0x6158>,
+ <0x009e0000 0xfff>;
+ reg-names = "core_physical",
+ "qfprom_physical",
+ "hdcp_physical";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&mmcc MDSS_MDP_CLK>,
+ <&mmcc MDSS_AHB_CLK>,
+ <&mmcc MDSS_HDMI_CLK>,
+ <&mmcc MDSS_HDMI_AHB_CLK>,
+ <&mmcc MDSS_EXTPCLK_CLK>;
+ clock-names =
+ "mdp_core_clk",
+ "iface_clk",
+ "core_clk",
+ "alt_iface_clk",
+ "extp_clk";
+
+ phys = <&hdmi_phy>;
+ phy-names = "hdmi_phy";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ hdmi_in: endpoint {
+ remote-endpoint = <&mdp5_intf3_out>;
+ };
+ };
+ };
+ };
+
+ hdmi_phy: hdmi-phy@9a0600 {
+ compatible = "qcom,hdmi-phy-8996";
+ reg = <0x9a0600 0x1c4>,
+ <0x9a0a00 0x124>,
+ <0x9a0c00 0x124>,
+ <0x9a0e00 0x124>,
+ <0x9a1000 0x124>,
+ <0x9a1200 0x0c8>;
+ reg-names = "hdmi_pll",
+ "hdmi_tx_l0",
+ "hdmi_tx_l1",
+ "hdmi_tx_l2",
+ "hdmi_tx_l3",
+ "hdmi_phy";
+
+ clocks = <&mmcc MDSS_AHB_CLK>,
+ <&gcc GCC_HDMI_CLKREF_CLK>;
+ clock-names = "iface_clk",
+ "ref_clk";
+ };
+ };
+ };
+
+ adsp-pil {
+ compatible = "qcom,msm8996-adsp-pil";
+
+ interrupts-extended = <&intc 0 162 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 3 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog", "fatal", "ready",
+ "handover", "stop-ack";
+
+ clocks = <&xo_board>;
+ clock-names = "xo";
+
+ memory-region = <&adsp_region>;
+
+ qcom,smem-states = <&adsp_smp2p_out 0>;
+ qcom,smem-state-names = "stop";
+
+ smd-edge {
+ interrupts = <GIC_SPI 156 IRQ_TYPE_EDGE_RISING>;
+
+ label = "lpass";
+ qcom,ipc = <&apcs 16 8>;
+ qcom,smd-edge = <1>;
+ qcom,remote-pid = <2>;
+ };
+ };
+
+ adsp-smp2p {
+ compatible = "qcom,smp2p";
+ qcom,smem = <443>, <429>;
+
+ interrupts = <0 158 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,ipc = <&apcs 16 10>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <2>;
+
+ adsp_smp2p_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ adsp_smp2p_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ modem-smp2p {
+ compatible = "qcom,smp2p";
+ qcom,smem = <435>, <428>;
+
+ interrupts = <GIC_SPI 451 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,ipc = <&apcs 16 14>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <1>;
+
+ modem_smp2p_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ modem_smp2p_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ smp2p-slpi {
+ compatible = "qcom,smp2p";
+ qcom,smem = <481>, <430>;
+
+ interrupts = <GIC_SPI 178 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,ipc = <&apcs 16 26>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <3>;
+
+ slpi_smp2p_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ slpi_smp2p_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+ };
+
+};
+#include "msm8996-pins.dtsi"
+#include "pm8994.dtsi"
+#include "pmi8994.dtsi"
diff --git a/rr-cache/9f497fbd837e3e347df386df77071ea45b8db013/postimage b/rr-cache/9f497fbd837e3e347df386df77071ea45b8db013/postimage
new file mode 100644
index 0000000..349c6a8
--- /dev/null
+++ b/rr-cache/9f497fbd837e3e347df386df77071ea45b8db013/postimage
@@ -0,0 +1,624 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+// Copyright (c) 2018, Linaro Limited
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/component.h>
+#include <sound/soc.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include <asm/dma.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_device.h>
+#include <sound/pcm_params.h>
+#include "q6asm.h"
+#include "q6routing.h"
+#include "q6dsp-errno.h"
+
+#define DRV_NAME "q6asm-fe-dai"
+
+#define PLAYBACK_MIN_NUM_PERIODS 2
+#define PLAYBACK_MAX_NUM_PERIODS 8
+#define PLAYBACK_MAX_PERIOD_SIZE 65536
+#define PLAYBACK_MIN_PERIOD_SIZE 128
+#define CAPTURE_MIN_NUM_PERIODS 2
+#define CAPTURE_MAX_NUM_PERIODS 8
+#define CAPTURE_MAX_PERIOD_SIZE 4096
+#define CAPTURE_MIN_PERIOD_SIZE 320
+#define SID_MASK_DEFAULT 0xF
+
+enum stream_state {
+ Q6ASM_STREAM_IDLE = 0,
+ Q6ASM_STREAM_STOPPED,
+ Q6ASM_STREAM_RUNNING,
+};
+
+struct q6asm_dai_rtd {
+ struct snd_pcm_substream *substream;
+ phys_addr_t phys;
+ unsigned int pcm_size;
+ unsigned int pcm_count;
+ unsigned int pcm_irq_pos; /* IRQ position */
+ unsigned int periods;
+ uint16_t bits_per_sample;
+ uint16_t source; /* Encoding source bit mask */
+ struct audio_client *audio_client;
+ uint16_t session_id;
+ enum stream_state state;
+};
+
+struct q6asm_dai_data {
+ long long int sid;
+};
+
+static struct snd_pcm_hardware q6asm_dai_hardware_capture = {
+ .info = (SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE),
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ .channels_min = 1,
+ .channels_max = 4,
+ .buffer_bytes_max = CAPTURE_MAX_NUM_PERIODS *
+ CAPTURE_MAX_PERIOD_SIZE,
+ .period_bytes_min = CAPTURE_MIN_PERIOD_SIZE,
+ .period_bytes_max = CAPTURE_MAX_PERIOD_SIZE,
+ .periods_min = CAPTURE_MIN_NUM_PERIODS,
+ .periods_max = CAPTURE_MAX_NUM_PERIODS,
+ .fifo_size = 0,
+};
+
+static struct snd_pcm_hardware q6asm_dai_hardware_playback = {
+ .info = (SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE),
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ .channels_min = 1,
+ .channels_max = 8,
+ .buffer_bytes_max = (PLAYBACK_MAX_NUM_PERIODS *
+ PLAYBACK_MAX_PERIOD_SIZE),
+ .period_bytes_min = PLAYBACK_MIN_PERIOD_SIZE,
+ .period_bytes_max = PLAYBACK_MAX_PERIOD_SIZE,
+ .periods_min = PLAYBACK_MIN_NUM_PERIODS,
+ .periods_max = PLAYBACK_MAX_NUM_PERIODS,
+ .fifo_size = 0,
+};
+
+#define Q6ASM_FEDAI_DRIVER(num) { \
+ .playback = { \
+ .stream_name = "MultiMedia"#num" Playback", \
+ .rates = (SNDRV_PCM_RATE_8000_192000| \
+ SNDRV_PCM_RATE_KNOT), \
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE), \
+ .channels_min = 1, \
+ .channels_max = 8, \
+ .rate_min = 8000, \
+ .rate_max = 192000, \
+ }, \
+ .capture = { \
+ .stream_name = "MultiMedia"#num" Capture", \
+ .rates = (SNDRV_PCM_RATE_8000_48000| \
+ SNDRV_PCM_RATE_KNOT), \
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE), \
+ .channels_min = 1, \
+ .channels_max = 4, \
+ .rate_min = 8000, \
+ .rate_max = 48000, \
+ }, \
+ .name = "MultiMedia"#num, \
+ .probe = fe_dai_probe, \
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA##num, \
+ }
+
+/* Conventional and unconventional sample rate supported */
+static unsigned int supported_sample_rates[] = {
+ 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000,
+ 88200, 96000, 176400, 192000
+};
+
+static struct snd_pcm_hw_constraint_list constraints_sample_rates = {
+ .count = ARRAY_SIZE(supported_sample_rates),
+ .list = supported_sample_rates,
+ .mask = 0,
+};
+
+static void event_handler(uint32_t opcode, uint32_t token,
+ uint32_t *payload, void *priv)
+{
+ struct q6asm_dai_rtd *prtd = priv;
+ struct snd_pcm_substream *substream = prtd->substream;
+
+ switch (opcode) {
+ case ASM_CLIENT_EVENT_CMD_RUN_DONE:
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ q6asm_write_async(prtd->audio_client,
+ prtd->pcm_count, 0, 0, NO_TIMESTAMP);
+ break;
+ case ASM_CLIENT_EVENT_CMD_EOS_DONE:
+ prtd->state = Q6ASM_STREAM_STOPPED;
+ break;
+ case ASM_CLIENT_EVENT_DATA_WRITE_DONE: {
+ prtd->pcm_irq_pos += prtd->pcm_count;
+ snd_pcm_period_elapsed(substream);
+ if (prtd->state == Q6ASM_STREAM_RUNNING)
+ q6asm_write_async(prtd->audio_client,
+ prtd->pcm_count, 0, 0, NO_TIMESTAMP);
+
+ break;
+ }
+ case ASM_CLIENT_EVENT_DATA_READ_DONE:
+ prtd->pcm_irq_pos += prtd->pcm_count;
+ snd_pcm_period_elapsed(substream);
+ if (prtd->state == Q6ASM_STREAM_RUNNING)
+ q6asm_read(prtd->audio_client);
+
+ break;
+ default:
+ break;
+ }
+}
+
+static int q6asm_dai_prepare(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
+ struct q6asm_dai_rtd *prtd = runtime->private_data;
+ struct snd_soc_component *c = snd_soc_rtdcom_lookup(soc_prtd, DRV_NAME);
+ struct q6asm_dai_data *pdata;
+ int ret, i;
+
+ pdata = snd_soc_component_get_drvdata(c);
+ if (!pdata)
+ return -EINVAL;
+
+ if (!prtd || !prtd->audio_client) {
+ pr_err("%s: private data null or audio client freed\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ prtd->pcm_count = snd_pcm_lib_period_bytes(substream);
+ prtd->pcm_irq_pos = 0;
+ /* rate and channels are sent to audio driver */
+ if (prtd->state) {
+ /* clear the previous setup if any */
+ q6asm_cmd(prtd->audio_client, CMD_CLOSE);
+ q6asm_unmap_memory_regions(substream->stream,
+ prtd->audio_client);
+ q6routing_stream_close(soc_prtd->dai_link->id,
+ substream->stream);
+ }
+
+ ret = q6asm_map_memory_regions(substream->stream, prtd->audio_client,
+ prtd->phys,
+ (prtd->pcm_size / prtd->periods),
+ prtd->periods);
+
+ if (ret < 0) {
+ pr_err("Audio Start: Buffer Allocation failed rc = %d\n",
+ ret);
+ return -ENOMEM;
+ }
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ ret = q6asm_open_write(prtd->audio_client, FORMAT_LINEAR_PCM,
+ prtd->bits_per_sample);
+ } else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+ ret = q6asm_open_read(prtd->audio_client, FORMAT_LINEAR_PCM,
+ prtd->bits_per_sample);
+ }
+
+ if (ret < 0) {
+ pr_err("%s: q6asm_open_write failed\n", __func__);
+ q6asm_audio_client_free(prtd->audio_client);
+ prtd->audio_client = NULL;
+ return -ENOMEM;
+ }
+
+ prtd->session_id = q6asm_get_session_id(prtd->audio_client);
+ ret = q6routing_stream_open(soc_prtd->dai_link->id, LEGACY_PCM_MODE,
+ prtd->session_id, substream->stream);
+ if (ret) {
+ pr_err("%s: stream reg failed ret:%d\n", __func__, ret);
+ return ret;
+ }
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ ret = q6asm_media_format_block_multi_ch_pcm(
+ prtd->audio_client, runtime->rate,
+ runtime->channels, NULL,
+ prtd->bits_per_sample);
+ } else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+ ret = q6asm_enc_cfg_blk_pcm_format_support(prtd->audio_client,
+ runtime->rate, runtime->channels,
+ prtd->bits_per_sample);
+
+ /* Queue the buffers */
+ for (i = 0; i < runtime->periods; i++)
+ q6asm_read(prtd->audio_client);
+
+ }
+ if (ret < 0)
+ pr_info("%s: CMD Format block failed\n", __func__);
+
+ prtd->state = Q6ASM_STREAM_RUNNING;
+
+ return 0;
+}
+
+static int q6asm_dai_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ int ret = 0;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct q6asm_dai_rtd *prtd = runtime->private_data;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ ret = q6asm_run_nowait(prtd->audio_client, 0, 0, 0);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ prtd->state = Q6ASM_STREAM_STOPPED;
+ ret = q6asm_cmd_nowait(prtd->audio_client, CMD_EOS);
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ ret = q6asm_cmd_nowait(prtd->audio_client, CMD_PAUSE);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int q6asm_dai_open(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = soc_prtd->cpu_dai;
+ struct snd_soc_component *c = snd_soc_rtdcom_lookup(soc_prtd, DRV_NAME);
+ struct q6asm_dai_rtd *prtd;
+ struct q6asm_dai_data *pdata;
+ struct device *dev = c->dev;
+ int ret = 0;
+ int stream_id;
+
+ stream_id = cpu_dai->driver->id;
+
+ pdata = snd_soc_component_get_drvdata(c);
+ if (!pdata) {
+ pr_err("Drv data not found ..\n");
+ return -EINVAL;
+ }
+
+ prtd = kzalloc(sizeof(struct q6asm_dai_rtd), GFP_KERNEL);
+ if (prtd == NULL)
+ return -ENOMEM;
+
+ prtd->substream = substream;
+ prtd->audio_client = q6asm_audio_client_alloc(dev,
+ (q6asm_cb)event_handler, prtd, stream_id,
+ LEGACY_PCM_MODE);
+ if (!prtd->audio_client) {
+ pr_info("%s: Could not allocate memory\n", __func__);
+ kfree(prtd);
+ return -ENOMEM;
+ }
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ runtime->hw = q6asm_dai_hardware_playback;
+ else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ runtime->hw = q6asm_dai_hardware_capture;
+
+ ret = snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_sample_rates);
+ if (ret < 0)
+ pr_info("snd_pcm_hw_constraint_list failed\n");
+ /* Ensure that buffer size is a multiple of period size */
+ ret = snd_pcm_hw_constraint_integer(runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (ret < 0)
+ pr_info("snd_pcm_hw_constraint_integer failed\n");
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ ret = snd_pcm_hw_constraint_minmax(runtime,
+ SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
+ PLAYBACK_MIN_NUM_PERIODS * PLAYBACK_MIN_PERIOD_SIZE,
+ PLAYBACK_MAX_NUM_PERIODS * PLAYBACK_MAX_PERIOD_SIZE);
+ if (ret < 0) {
+ pr_err("constraint for buffer bytes min max ret = %d\n",
+ ret);
+ }
+ }
+
+ ret = snd_pcm_hw_constraint_step(runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 32);
+ if (ret < 0) {
+ pr_err("constraint for period bytes step ret = %d\n",
+ ret);
+ }
+ ret = snd_pcm_hw_constraint_step(runtime, 0,
+ SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 32);
+ if (ret < 0) {
+ pr_err("constraint for buffer bytes step ret = %d\n",
+ ret);
+ }
+
+ runtime->private_data = prtd;
+
+ snd_soc_set_runtime_hwparams(substream, &q6asm_dai_hardware_playback);
+
+ runtime->dma_bytes = q6asm_dai_hardware_playback.buffer_bytes_max;
+
+
+ if (pdata->sid < 0)
+ prtd->phys = substream->dma_buffer.addr;
+ else
+ prtd->phys = substream->dma_buffer.addr | (pdata->sid << 32);
+
+ snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
+
+ return 0;
+}
+
+static int q6asm_dai_close(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
+ struct q6asm_dai_rtd *prtd = runtime->private_data;
+
+ if (prtd->audio_client) {
+ q6asm_cmd(prtd->audio_client, CMD_CLOSE);
+ q6asm_unmap_memory_regions(substream->stream,
+ prtd->audio_client);
+ q6asm_audio_client_free(prtd->audio_client);
+ prtd->audio_client = NULL;
+ }
+ q6routing_stream_close(soc_prtd->dai_link->id,
+ substream->stream);
+ kfree(prtd);
+ return 0;
+}
+
+static snd_pcm_uframes_t q6asm_dai_pointer(struct snd_pcm_substream *substream)
+{
+
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct q6asm_dai_rtd *prtd = runtime->private_data;
+
+ if (prtd->pcm_irq_pos >= prtd->pcm_size)
+ prtd->pcm_irq_pos = 0;
+
+ return bytes_to_frames(runtime, (prtd->pcm_irq_pos));
+}
+
+static int q6asm_dai_mmap(struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma)
+{
+
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
+ struct snd_soc_component *c = snd_soc_rtdcom_lookup(soc_prtd, DRV_NAME);
+ struct device *dev = c->dev;
+
+ return dma_mmap_coherent(dev, vma,
+ runtime->dma_area, runtime->dma_addr,
+ runtime->dma_bytes);
+}
+
+static int q6asm_dai_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct q6asm_dai_rtd *prtd = runtime->private_data;
+
+ prtd->pcm_size = params_buffer_bytes(params);
+ prtd->periods = params_periods(params);
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ prtd->bits_per_sample = 16;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ prtd->bits_per_sample = 24;
+ break;
+ }
+
+ return 0;
+}
+
+static struct snd_pcm_ops q6asm_dai_ops = {
+ .open = q6asm_dai_open,
+ .hw_params = q6asm_dai_hw_params,
+ .close = q6asm_dai_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .prepare = q6asm_dai_prepare,
+ .trigger = q6asm_dai_trigger,
+ .pointer = q6asm_dai_pointer,
+ .mmap = q6asm_dai_mmap,
+};
+
+static int q6asm_dai_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_pcm_substream *psubstream, *csubstream;
+ struct snd_soc_component *c = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
+ struct snd_pcm *pcm = rtd->pcm;
+ struct device *dev;
+ int size, ret;
+
+ dev = c->dev;
+ size = q6asm_dai_hardware_playback.buffer_bytes_max;
+ psubstream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+ if (psubstream) {
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, dev, size,
+ &psubstream->dma_buffer);
+ if (ret) {
+ dev_err(dev, "Cannot allocate buffer(s)\n");
+ return ret;
+ }
+ }
+
+ csubstream = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream;
+ if (csubstream) {
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, dev, size,
+ &csubstream->dma_buffer);
+ if (ret) {
+ dev_err(dev, "Cannot allocate buffer(s)\n");
+ if (psubstream)
+ snd_dma_free_pages(&psubstream->dma_buffer);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static void q6asm_dai_pcm_free(struct snd_pcm *pcm)
+{
+ struct snd_pcm_substream *substream;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pcm->streams); i++) {
+ substream = pcm->streams[i].substream;
+ if (substream) {
+ snd_dma_free_pages(&substream->dma_buffer);
+ substream->dma_buffer.area = NULL;
+ substream->dma_buffer.addr = 0;
+ }
+ }
+}
+
+static const struct snd_soc_dapm_route afe_pcm_routes[] = {
+ {"MM_DL1", NULL, "MultiMedia1 Playback" },
+ {"MM_DL2", NULL, "MultiMedia2 Playback" },
+ {"MM_DL3", NULL, "MultiMedia3 Playback" },
+ {"MM_DL4", NULL, "MultiMedia4 Playback" },
+ {"MM_DL5", NULL, "MultiMedia5 Playback" },
+ {"MM_DL6", NULL, "MultiMedia6 Playback" },
+ {"MM_DL7", NULL, "MultiMedia7 Playback" },
+ {"MM_DL7", NULL, "MultiMedia8 Playback" },
+ {"MultiMedia1 Capture", NULL, "MM_UL1"},
+ {"MultiMedia2 Capture", NULL, "MM_UL2"},
+ {"MultiMedia3 Capture", NULL, "MM_UL3"},
+ {"MultiMedia4 Capture", NULL, "MM_UL4"},
+ {"MultiMedia5 Capture", NULL, "MM_UL5"},
+ {"MultiMedia6 Capture", NULL, "MM_UL6"},
+ {"MultiMedia7 Capture", NULL, "MM_UL7"},
+ {"MultiMedia8 Capture", NULL, "MM_UL8"},
+
+};
+
+static int fe_dai_probe(struct snd_soc_dai *dai)
+{
+ struct snd_soc_dapm_context *dapm;
+
+ dapm = snd_soc_component_get_dapm(dai->component);
+ snd_soc_dapm_add_routes(dapm, afe_pcm_routes,
+ ARRAY_SIZE(afe_pcm_routes));
+
+ return 0;
+}
+
+
+static const struct snd_soc_component_driver q6asm_fe_dai_component = {
+ .name = DRV_NAME,
+ .ops = &q6asm_dai_ops,
+ .pcm_new = q6asm_dai_pcm_new,
+ .pcm_free = q6asm_dai_pcm_free,
+
+};
+
+static struct snd_soc_dai_driver q6asm_fe_dais[] = {
+ Q6ASM_FEDAI_DRIVER(1),
+ Q6ASM_FEDAI_DRIVER(2),
+ Q6ASM_FEDAI_DRIVER(3),
+ Q6ASM_FEDAI_DRIVER(4),
+ Q6ASM_FEDAI_DRIVER(5),
+ Q6ASM_FEDAI_DRIVER(6),
+ Q6ASM_FEDAI_DRIVER(7),
+ Q6ASM_FEDAI_DRIVER(8),
+};
+
+static int q6asm_dai_bind(struct device *dev, struct device *master, void *data)
+{
+ struct device_node *node = dev->of_node;
+ struct of_phandle_args args;
+ struct q6asm_dai_data *pdata;
+ int rc;
+
+ pdata = kzalloc(sizeof(struct q6asm_dai_data), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ rc = of_parse_phandle_with_fixed_args(node, "iommus", 1, 0, &args);
+ if (rc < 0)
+ pdata->sid = -1;
+ else
+ pdata->sid = args.args[0] & SID_MASK_DEFAULT;
+
+ dev_set_drvdata(dev, pdata);
+
+ return snd_soc_register_component(dev, &q6asm_fe_dai_component,
+ q6asm_fe_dais,
+ ARRAY_SIZE(q6asm_fe_dais));
+}
+static void q6asm_dai_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct q6asm_dai_data *pdata = dev_get_drvdata(dev);
+
+ snd_soc_unregister_component(dev);
+
+ kfree(pdata);
+
+}
+
+static const struct component_ops q6asm_dai_comp_ops = {
+ .bind = q6asm_dai_bind,
+ .unbind = q6asm_dai_unbind,
+};
+
+static int q6asm_dai_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &q6asm_dai_comp_ops);
+}
+
+static int q6asm_dai_dev_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &q6asm_dai_comp_ops);
+ return 0;
+}
+
+static struct platform_driver q6asm_dai_platform_driver = {
+ .driver = {
+ .name = "q6asm-dai",
+ },
+ .probe = q6asm_dai_probe,
+ .remove = q6asm_dai_dev_remove,
+};
+module_platform_driver(q6asm_dai_platform_driver);
+
+MODULE_DESCRIPTION("Q6ASM dai driver");
+MODULE_LICENSE("GPL v2");
diff --git a/rr-cache/9f497fbd837e3e347df386df77071ea45b8db013/preimage b/rr-cache/9f497fbd837e3e347df386df77071ea45b8db013/preimage
new file mode 100644
index 0000000..a8d6c6f
--- /dev/null
+++ b/rr-cache/9f497fbd837e3e347df386df77071ea45b8db013/preimage
@@ -0,0 +1,953 @@
+// SPDX-License-Identifier: GPL-2.0
+<<<<<<<
+/*
+ * Copyright (c) 2011-2016, The Linux Foundation
+ * Copyright (c) 2017, Linaro Limited
+ */
+=======
+// Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+// Copyright (c) 2018, Linaro Limited
+>>>>>>>
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+<<<<<<<
+=======
+#include <linux/component.h>
+#include <sound/soc.h>
+>>>>>>>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include <asm/dma.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_device.h>
+#include <sound/pcm_params.h>
+#include "q6asm.h"
+#include "q6routing.h"
+#include "q6dsp-errno.h"
+
+<<<<<<<
+=======
+#define DRV_NAME "q6asm-fe-dai"
+
+>>>>>>>
+#define PLAYBACK_MIN_NUM_PERIODS 2
+#define PLAYBACK_MAX_NUM_PERIODS 8
+#define PLAYBACK_MAX_PERIOD_SIZE 65536
+#define PLAYBACK_MIN_PERIOD_SIZE 128
+#define CAPTURE_MIN_NUM_PERIODS 2
+#define CAPTURE_MAX_NUM_PERIODS 8
+#define CAPTURE_MAX_PERIOD_SIZE 4096
+#define CAPTURE_MIN_PERIOD_SIZE 320
+<<<<<<<
+=======
+#define SID_MASK_DEFAULT 0xF
+>>>>>>>
+
+enum stream_state {
+ Q6ASM_STREAM_IDLE = 0,
+ Q6ASM_STREAM_STOPPED,
+ Q6ASM_STREAM_RUNNING,
+};
+
+struct q6asm_dai_rtd {
+ struct snd_pcm_substream *substream;
+ phys_addr_t phys;
+ unsigned int pcm_size;
+ unsigned int pcm_count;
+ unsigned int pcm_irq_pos; /* IRQ position */
+ unsigned int periods;
+ uint16_t bits_per_sample;
+ uint16_t source; /* Encoding source bit mask */
+ struct audio_client *audio_client;
+ uint16_t session_id;
+ enum stream_state state;
+};
+
+struct q6asm_dai_data {
+ long long int sid;
+};
+
+static struct snd_pcm_hardware q6asm_dai_hardware_capture = {
+ .info = (SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE),
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ .channels_min = 1,
+ .channels_max = 4,
+ .buffer_bytes_max = CAPTURE_MAX_NUM_PERIODS *
+ CAPTURE_MAX_PERIOD_SIZE,
+ .period_bytes_min = CAPTURE_MIN_PERIOD_SIZE,
+ .period_bytes_max = CAPTURE_MAX_PERIOD_SIZE,
+ .periods_min = CAPTURE_MIN_NUM_PERIODS,
+ .periods_max = CAPTURE_MAX_NUM_PERIODS,
+ .fifo_size = 0,
+};
+
+static struct snd_pcm_hardware q6asm_dai_hardware_playback = {
+ .info = (SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE),
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ .channels_min = 1,
+ .channels_max = 8,
+ .buffer_bytes_max = (PLAYBACK_MAX_NUM_PERIODS *
+ PLAYBACK_MAX_PERIOD_SIZE),
+ .period_bytes_min = PLAYBACK_MIN_PERIOD_SIZE,
+ .period_bytes_max = PLAYBACK_MAX_PERIOD_SIZE,
+ .periods_min = PLAYBACK_MIN_NUM_PERIODS,
+ .periods_max = PLAYBACK_MAX_NUM_PERIODS,
+ .fifo_size = 0,
+};
+
+<<<<<<<
+=======
+#define Q6ASM_FEDAI_DRIVER(num) { \
+ .playback = { \
+ .stream_name = "MultiMedia"#num" Playback", \
+ .rates = (SNDRV_PCM_RATE_8000_192000| \
+ SNDRV_PCM_RATE_KNOT), \
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE), \
+ .channels_min = 1, \
+ .channels_max = 8, \
+ .rate_min = 8000, \
+ .rate_max = 192000, \
+ }, \
+ .capture = { \
+ .stream_name = "MultiMedia"#num" Capture", \
+ .rates = (SNDRV_PCM_RATE_8000_48000| \
+ SNDRV_PCM_RATE_KNOT), \
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE), \
+ .channels_min = 1, \
+ .channels_max = 4, \
+ .rate_min = 8000, \
+ .rate_max = 48000, \
+ }, \
+ .name = "MultiMedia"#num, \
+ .probe = fe_dai_probe, \
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA##num, \
+ }
+
+>>>>>>>
+/* Conventional and unconventional sample rate supported */
+static unsigned int supported_sample_rates[] = {
+ 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000,
+ 88200, 96000, 176400, 192000
+};
+
+static struct snd_pcm_hw_constraint_list constraints_sample_rates = {
+ .count = ARRAY_SIZE(supported_sample_rates),
+ .list = supported_sample_rates,
+ .mask = 0,
+};
+
+static void event_handler(uint32_t opcode, uint32_t token,
+ uint32_t *payload, void *priv)
+{
+ struct q6asm_dai_rtd *prtd = priv;
+ struct snd_pcm_substream *substream = prtd->substream;
+
+ switch (opcode) {
+ case ASM_CLIENT_EVENT_CMD_RUN_DONE:
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ q6asm_write_async(prtd->audio_client,
+ prtd->pcm_count, 0, 0, NO_TIMESTAMP);
+ break;
+ case ASM_CLIENT_EVENT_CMD_EOS_DONE:
+ prtd->state = Q6ASM_STREAM_STOPPED;
+ break;
+ case ASM_CLIENT_EVENT_DATA_WRITE_DONE: {
+ prtd->pcm_irq_pos += prtd->pcm_count;
+ snd_pcm_period_elapsed(substream);
+ if (prtd->state == Q6ASM_STREAM_RUNNING)
+ q6asm_write_async(prtd->audio_client,
+ prtd->pcm_count, 0, 0, NO_TIMESTAMP);
+
+ break;
+ }
+ case ASM_CLIENT_EVENT_DATA_READ_DONE:
+ prtd->pcm_irq_pos += prtd->pcm_count;
+ snd_pcm_period_elapsed(substream);
+ if (prtd->state == Q6ASM_STREAM_RUNNING)
+ q6asm_read(prtd->audio_client);
+
+ break;
+ default:
+ break;
+ }
+}
+
+static int q6asm_dai_prepare(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
+ struct q6asm_dai_rtd *prtd = runtime->private_data;
+<<<<<<<
+ struct q6asm_dai_data *pdata;
+ int ret, i;
+
+ pdata = q6asm_get_dai_data(soc_prtd->platform->dev);
+=======
+ struct snd_soc_component *c = snd_soc_rtdcom_lookup(soc_prtd, DRV_NAME);
+ struct q6asm_dai_data *pdata;
+ int ret, i;
+
+ pdata = snd_soc_component_get_drvdata(c);
+>>>>>>>
+ if (!pdata)
+ return -EINVAL;
+
+ if (!prtd || !prtd->audio_client) {
+ pr_err("%s: private data null or audio client freed\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ prtd->pcm_count = snd_pcm_lib_period_bytes(substream);
+ prtd->pcm_irq_pos = 0;
+ /* rate and channels are sent to audio driver */
+ if (prtd->state) {
+ /* clear the previous setup if any */
+ q6asm_cmd(prtd->audio_client, CMD_CLOSE);
+ q6asm_unmap_memory_regions(substream->stream,
+ prtd->audio_client);
+ q6routing_stream_close(soc_prtd->dai_link->id,
+ substream->stream);
+ }
+
+ ret = q6asm_map_memory_regions(substream->stream, prtd->audio_client,
+ prtd->phys,
+ (prtd->pcm_size / prtd->periods),
+ prtd->periods);
+
+ if (ret < 0) {
+ pr_err("Audio Start: Buffer Allocation failed rc = %d\n",
+ ret);
+ return -ENOMEM;
+ }
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ ret = q6asm_open_write(prtd->audio_client, FORMAT_LINEAR_PCM,
+ prtd->bits_per_sample);
+<<<<<<<
+ } else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+ ret = q6asm_open_read(prtd->audio_client, FORMAT_LINEAR_PCM,
+ prtd->bits_per_sample);
+ }
+
+=======
+ }else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+ ret = q6asm_open_read(prtd->audio_client, FORMAT_LINEAR_PCM,
+ prtd->bits_per_sample);
+
+ }
+>>>>>>>
+ if (ret < 0) {
+ pr_err("%s: q6asm_open_write failed\n", __func__);
+ q6asm_audio_client_free(prtd->audio_client);
+ prtd->audio_client = NULL;
+ return -ENOMEM;
+ }
+
+ prtd->session_id = q6asm_get_session_id(prtd->audio_client);
+ ret = q6routing_stream_open(soc_prtd->dai_link->id, LEGACY_PCM_MODE,
+ prtd->session_id, substream->stream);
+ if (ret) {
+ pr_err("%s: stream reg failed ret:%d\n", __func__, ret);
+ return ret;
+ }
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ ret = q6asm_media_format_block_multi_ch_pcm(
+ prtd->audio_client, runtime->rate,
+ runtime->channels, NULL,
+ prtd->bits_per_sample);
+<<<<<<<
+ } else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+=======
+ }else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+>>>>>>>
+ ret = q6asm_enc_cfg_blk_pcm_format_support(prtd->audio_client,
+ runtime->rate, runtime->channels,
+ prtd->bits_per_sample);
+
+ /* Queue the buffers */
+ for (i = 0; i < runtime->periods; i++)
+ q6asm_read(prtd->audio_client);
+
+ }
+ if (ret < 0)
+ pr_info("%s: CMD Format block failed\n", __func__);
+
+ prtd->state = Q6ASM_STREAM_RUNNING;
+
+ return 0;
+}
+
+static int q6asm_dai_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ int ret = 0;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct q6asm_dai_rtd *prtd = runtime->private_data;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ ret = q6asm_run_nowait(prtd->audio_client, 0, 0, 0);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ prtd->state = Q6ASM_STREAM_STOPPED;
+ ret = q6asm_cmd_nowait(prtd->audio_client, CMD_EOS);
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ ret = q6asm_cmd_nowait(prtd->audio_client, CMD_PAUSE);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int q6asm_dai_open(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = soc_prtd->cpu_dai;
+<<<<<<<
+ struct snd_soc_component *c = snd_soc_rtdcom_lookup(soc_prtd, DRV_NAME);
+ struct q6asm_dai_rtd *prtd;
+ struct q6asm_dai_data *pdata;
+ struct device *dev = c->dev;
+=======
+
+ struct q6asm_dai_rtd *prtd;
+ struct q6asm_dai_data *pdata;
+ struct device *dev = soc_prtd->platform->dev;
+>>>>>>>
+ int ret = 0;
+ int stream_id;
+
+ stream_id = cpu_dai->driver->id;
+
+<<<<<<<
+ pdata = q6asm_get_dai_data(dev);
+ if (!pdata) {
+ pr_err("Platform data not found ..\n");
+=======
+ pdata = snd_soc_component_get_drvdata(c);
+ if (!pdata) {
+ pr_err("Drv data not found ..\n");
+>>>>>>>
+ return -EINVAL;
+ }
+
+ prtd = kzalloc(sizeof(struct q6asm_dai_rtd), GFP_KERNEL);
+ if (prtd == NULL)
+ return -ENOMEM;
+
+ prtd->substream = substream;
+ prtd->audio_client = q6asm_audio_client_alloc(dev,
+<<<<<<<
+ (q6asm_cb)event_handler, prtd, stream_id);
+=======
+ (q6asm_cb)event_handler, prtd, stream_id,
+ LEGACY_PCM_MODE);
+>>>>>>>
+ if (!prtd->audio_client) {
+ pr_info("%s: Could not allocate memory\n", __func__);
+ kfree(prtd);
+ return -ENOMEM;
+ }
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ runtime->hw = q6asm_dai_hardware_playback;
+ else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ runtime->hw = q6asm_dai_hardware_capture;
+
+ ret = snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_sample_rates);
+ if (ret < 0)
+ pr_info("snd_pcm_hw_constraint_list failed\n");
+ /* Ensure that buffer size is a multiple of period size */
+ ret = snd_pcm_hw_constraint_integer(runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (ret < 0)
+ pr_info("snd_pcm_hw_constraint_integer failed\n");
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ ret = snd_pcm_hw_constraint_minmax(runtime,
+ SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
+ PLAYBACK_MIN_NUM_PERIODS * PLAYBACK_MIN_PERIOD_SIZE,
+ PLAYBACK_MAX_NUM_PERIODS * PLAYBACK_MAX_PERIOD_SIZE);
+ if (ret < 0) {
+ pr_err("constraint for buffer bytes min max ret = %d\n",
+ ret);
+ }
+ }
+
+ ret = snd_pcm_hw_constraint_step(runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 32);
+ if (ret < 0) {
+ pr_err("constraint for period bytes step ret = %d\n",
+ ret);
+ }
+ ret = snd_pcm_hw_constraint_step(runtime, 0,
+ SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 32);
+ if (ret < 0) {
+ pr_err("constraint for buffer bytes step ret = %d\n",
+ ret);
+ }
+
+ runtime->private_data = prtd;
+
+ snd_soc_set_runtime_hwparams(substream, &q6asm_dai_hardware_playback);
+
+ runtime->dma_bytes = q6asm_dai_hardware_playback.buffer_bytes_max;
+
+
+ if (pdata->sid < 0)
+ prtd->phys = substream->dma_buffer.addr;
+ else
+ prtd->phys = substream->dma_buffer.addr | (pdata->sid << 32);
+
+ snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
+
+ return 0;
+}
+
+static int q6asm_dai_close(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
+ struct q6asm_dai_rtd *prtd = runtime->private_data;
+
+ if (prtd->audio_client) {
+ q6asm_cmd(prtd->audio_client, CMD_CLOSE);
+ q6asm_unmap_memory_regions(substream->stream,
+ prtd->audio_client);
+ q6asm_audio_client_free(prtd->audio_client);
+<<<<<<<
+=======
+ prtd->audio_client = NULL;
+>>>>>>>
+ }
+ q6routing_stream_close(soc_prtd->dai_link->id,
+ substream->stream);
+ kfree(prtd);
+ return 0;
+}
+
+static snd_pcm_uframes_t q6asm_dai_pointer(struct snd_pcm_substream *substream)
+{
+
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct q6asm_dai_rtd *prtd = runtime->private_data;
+
+ if (prtd->pcm_irq_pos >= prtd->pcm_size)
+ prtd->pcm_irq_pos = 0;
+
+ return bytes_to_frames(runtime, (prtd->pcm_irq_pos));
+}
+
+static int q6asm_dai_mmap(struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma)
+{
+
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
+<<<<<<<
+ struct device *dev = soc_prtd->platform->dev->parent;
+=======
+ struct snd_soc_component *c = snd_soc_rtdcom_lookup(soc_prtd, DRV_NAME);
+ struct device *dev = c->dev;
+>>>>>>>
+
+ return dma_mmap_coherent(dev, vma,
+ runtime->dma_area, runtime->dma_addr,
+ runtime->dma_bytes);
+}
+
+static int q6asm_dai_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct q6asm_dai_rtd *prtd = runtime->private_data;
+
+ prtd->pcm_size = params_buffer_bytes(params);
+ prtd->periods = params_periods(params);
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ prtd->bits_per_sample = 16;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ prtd->bits_per_sample = 24;
+ break;
+ }
+
+ return 0;
+}
+
+static struct snd_pcm_ops q6asm_dai_ops = {
+ .open = q6asm_dai_open,
+ .hw_params = q6asm_dai_hw_params,
+ .close = q6asm_dai_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .prepare = q6asm_dai_prepare,
+ .trigger = q6asm_dai_trigger,
+ .pointer = q6asm_dai_pointer,
+ .mmap = q6asm_dai_mmap,
+};
+
+static int q6asm_dai_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_pcm_substream *psubstream, *csubstream;
+<<<<<<<
+ struct of_phandle_args args;
+ struct device_node *node;
+ struct q6asm_dai_data *pdata;
+=======
+ struct snd_soc_component *c = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
+>>>>>>>
+ struct snd_pcm *pcm = rtd->pcm;
+ struct device *dev;
+ int size, ret;
+
+<<<<<<<
+ dev = c->dev;
+ size = q6asm_dai_hardware_playback.buffer_bytes_max;
+ psubstream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+ if (psubstream) {
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, dev, size,
+ &psubstream->dma_buffer);
+ if (ret) {
+ dev_err(dev, "Cannot allocate buffer(s)\n");
+ return ret;
+ }
+ }
+
+ csubstream = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream;
+ if (csubstream) {
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, dev, size,
+ &csubstream->dma_buffer);
+ if (ret) {
+ dev_err(dev, "Cannot allocate buffer(s)\n");
+ if (psubstream)
+ snd_dma_free_pages(&psubstream->dma_buffer);
+ return ret;
+ }
+ }
+=======
+ dev = rtd->platform->dev->parent;
+ node = dev->of_node;
+ pdata = q6asm_get_dai_data(rtd->platform->dev);
+
+ ret = of_parse_phandle_with_fixed_args(node, "iommus", 1, 0, &args);
+ if (ret < 0)
+ pdata->sid = -1;
+ else
+ pdata->sid = args.args[0];
+
+ size = q6asm_dai_hardware_playback.buffer_bytes_max;
+
+ psubstream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+ if (psubstream) {
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, dev, size,
+ &psubstream->dma_buffer);
+ if (ret) {
+ dev_err(dev, "Cannot allocate buffer(s)\n");
+ return ret;
+ }
+ }
+
+ csubstream = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream;
+ if (csubstream) {
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, dev, size,
+ &csubstream->dma_buffer);
+ if (ret) {
+ dev_err(dev, "Cannot allocate buffer(s)\n");
+ if (psubstream)
+ snd_dma_free_pages(&psubstream->dma_buffer);
+ return ret;
+ }
+ }
+
+>>>>>>>
+
+ return ret;
+}
+
+static void q6asm_dai_pcm_free(struct snd_pcm *pcm)
+{
+ struct snd_pcm_substream *substream;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pcm->streams); i++) {
+ substream = pcm->streams[i].substream;
+ if (substream) {
+ snd_dma_free_pages(&substream->dma_buffer);
+ substream->dma_buffer.area = NULL;
+ substream->dma_buffer.addr = 0;
+ }
+ }
+}
+
+<<<<<<<
+=======
+static struct snd_soc_platform_driver q6asm_soc_platform = {
+ .ops = &q6asm_dai_ops,
+ .pcm_new = q6asm_dai_pcm_new,
+ .pcm_free = q6asm_dai_pcm_free,
+
+};
+
+>>>>>>>
+static const struct snd_soc_dapm_route afe_pcm_routes[] = {
+ {"MM_DL1", NULL, "MultiMedia1 Playback" },
+ {"MM_DL2", NULL, "MultiMedia2 Playback" },
+ {"MM_DL3", NULL, "MultiMedia3 Playback" },
+ {"MM_DL4", NULL, "MultiMedia4 Playback" },
+ {"MM_DL5", NULL, "MultiMedia5 Playback" },
+ {"MM_DL6", NULL, "MultiMedia6 Playback" },
+ {"MM_DL7", NULL, "MultiMedia7 Playback" },
+<<<<<<<
+ {"MM_DL7", NULL, "MultiMedia8 Playback" },
+ {"MultiMedia1 Capture", NULL, "MM_UL1"},
+ {"MultiMedia2 Capture", NULL, "MM_UL2"},
+ {"MultiMedia3 Capture", NULL, "MM_UL3"},
+ {"MultiMedia4 Capture", NULL, "MM_UL4"},
+ {"MultiMedia5 Capture", NULL, "MM_UL5"},
+ {"MultiMedia6 Capture", NULL, "MM_UL6"},
+ {"MultiMedia7 Capture", NULL, "MM_UL7"},
+ {"MultiMedia8 Capture", NULL, "MM_UL8"},
+=======
+ {"MultiMedia1 Capture" , NULL, "MM_UL1"},
+ {"MultiMedia2 Capture" , NULL, "MM_UL2"},
+ {"MultiMedia4 Capture" , NULL, "MM_UL4"},
+>>>>>>>
+
+};
+
+static int fe_dai_probe(struct snd_soc_dai *dai)
+{
+ struct snd_soc_dapm_context *dapm;
+
+ dapm = snd_soc_component_get_dapm(dai->component);
+ snd_soc_dapm_add_routes(dapm, afe_pcm_routes,
+ ARRAY_SIZE(afe_pcm_routes));
+
+ return 0;
+}
+
+<<<<<<<
+
+static const struct snd_soc_component_driver q6asm_fe_dai_component = {
+ .name = DRV_NAME,
+ .ops = &q6asm_dai_ops,
+ .pcm_new = q6asm_dai_pcm_new,
+ .pcm_free = q6asm_dai_pcm_free,
+
+};
+
+static struct snd_soc_dai_driver q6asm_fe_dais[] = {
+ Q6ASM_FEDAI_DRIVER(1),
+ Q6ASM_FEDAI_DRIVER(2),
+ Q6ASM_FEDAI_DRIVER(3),
+ Q6ASM_FEDAI_DRIVER(4),
+ Q6ASM_FEDAI_DRIVER(5),
+ Q6ASM_FEDAI_DRIVER(6),
+ Q6ASM_FEDAI_DRIVER(7),
+ Q6ASM_FEDAI_DRIVER(8),
+};
+
+static int q6asm_dai_bind(struct device *dev, struct device *master, void *data)
+{
+ struct device_node *node = dev->of_node;
+ struct of_phandle_args args;
+ struct q6asm_dai_data *pdata;
+ int rc;
+
+ pdata = kzalloc(sizeof(struct q6asm_dai_data), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ rc = of_parse_phandle_with_fixed_args(node, "iommus", 1, 0, &args);
+ if (rc < 0)
+ pdata->sid = -1;
+ else
+ pdata->sid = args.args[0] & SID_MASK_DEFAULT;
+
+ dev_set_drvdata(dev, pdata);
+
+ return snd_soc_register_component(dev, &q6asm_fe_dai_component,
+ q6asm_fe_dais,
+ ARRAY_SIZE(q6asm_fe_dais));
+}
+static void q6asm_dai_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct q6asm_dai_data *pdata = dev_get_drvdata(dev);
+
+ snd_soc_unregister_component(dev);
+
+ kfree(pdata);
+
+}
+
+static const struct component_ops q6asm_dai_comp_ops = {
+ .bind = q6asm_dai_bind,
+ .unbind = q6asm_dai_unbind,
+};
+
+static int q6asm_dai_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &q6asm_dai_comp_ops);
+}
+
+static int q6asm_dai_dev_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &q6asm_dai_comp_ops);
+ return 0;
+}
+
+static struct platform_driver q6asm_dai_platform_driver = {
+ .driver = {
+ .name = "q6asm-dai",
+ },
+ .probe = q6asm_dai_probe,
+ .remove = q6asm_dai_dev_remove,
+};
+module_platform_driver(q6asm_dai_platform_driver);
+=======
+static const struct snd_soc_component_driver q6asm_fe_dai_component = {
+ .name = "q6asm-fe-dai",
+};
+
+static struct snd_soc_dai_driver q6asm_fe_dais[] = {
+ {
+ .playback = {
+ .stream_name = "MultiMedia1 Playback",
+ .rates = (SNDRV_PCM_RATE_8000_192000|
+ SNDRV_PCM_RATE_KNOT),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE),
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ .capture = {
+ .stream_name = "MultiMedia1 Capture",
+ .rates = (SNDRV_PCM_RATE_8000_48000|
+ SNDRV_PCM_RATE_KNOT),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE),
+ .channels_min = 1,
+ .channels_max = 4,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .name = "MultiMedia1",
+ .probe = fe_dai_probe,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA1,
+ },
+ {
+ .playback = {
+ .stream_name = "MultiMedia2 Playback",
+ .rates = (SNDRV_PCM_RATE_8000_192000|
+ SNDRV_PCM_RATE_KNOT),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE),
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ .capture = {
+ .stream_name = "MultiMedia2 Capture",
+ .rates = (SNDRV_PCM_RATE_8000_48000|
+ SNDRV_PCM_RATE_KNOT),
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .name = "MultiMedia2",
+ .probe = fe_dai_probe,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA2,
+ },
+ {
+ .playback = {
+ .stream_name = "MultiMedia3 Playback",
+ .rates = (SNDRV_PCM_RATE_8000_192000|
+ SNDRV_PCM_RATE_KNOT),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE),
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ .name = "MultiMedia3",
+ .probe = fe_dai_probe,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA3,
+ },
+ {
+ .playback = {
+ .stream_name = "MultiMedia4 Playback",
+ .rates = (SNDRV_PCM_RATE_8000_192000|
+ SNDRV_PCM_RATE_KNOT),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE),
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ .capture = {
+ .stream_name = "MultiMedia4 Capture",
+ .rates = (SNDRV_PCM_RATE_8000_48000|
+ SNDRV_PCM_RATE_KNOT),
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .name = "MultiMedia4",
+ .probe = fe_dai_probe,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA4,
+ },
+ {
+ .playback = {
+ .stream_name = "MultiMedia5 Playback",
+ .rates = (SNDRV_PCM_RATE_8000_192000|
+ SNDRV_PCM_RATE_KNOT),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE),
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ .capture = {
+ .stream_name = "MultiMedia5 Capture",
+ .rates = (SNDRV_PCM_RATE_8000_48000|
+ SNDRV_PCM_RATE_KNOT),
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .name = "MultiMedia5",
+ .probe = fe_dai_probe,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA5,
+ },
+ {
+ .playback = {
+ .stream_name = "MultiMedia6 Playback",
+ .rates = (SNDRV_PCM_RATE_8000_192000|
+ SNDRV_PCM_RATE_KNOT),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE),
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ .name = "MultiMedia6",
+ .probe = fe_dai_probe,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA6,
+ },
+ {
+ .playback = {
+ .stream_name = "MultiMedia7 Playback",
+ .rates = (SNDRV_PCM_RATE_8000_192000|
+ SNDRV_PCM_RATE_KNOT),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE),
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ .name = "MultiMedia7",
+ .probe = fe_dai_probe,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA7,
+ },
+ {
+ .playback = {
+ .stream_name = "MultiMedia8 Playback",
+ .rates = (SNDRV_PCM_RATE_8000_192000|
+ SNDRV_PCM_RATE_KNOT),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE),
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ .name = "MultiMedia8",
+ .probe = fe_dai_probe,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA8,
+ },
+};
+
+int q6asm_dai_probe(struct device *dev)
+{
+ struct q6asm_dai_data *pdata;
+ int rc;
+
+ pdata = devm_kzalloc(dev, sizeof(struct q6asm_dai_data), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+
+ q6asm_set_dai_data(dev, pdata);
+
+ rc = devm_snd_soc_register_platform(dev, &q6asm_soc_platform);
+ if (rc) {
+ dev_err(dev, "err_dai_platform\n");
+ return rc;
+ }
+
+ return devm_snd_soc_register_component(dev, &q6asm_fe_dai_component,
+ q6asm_fe_dais,
+ ARRAY_SIZE(q6asm_fe_dais));
+}
+EXPORT_SYMBOL_GPL(q6asm_dai_probe);
+
+int q6asm_dai_remove(struct device *dev)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(q6asm_dai_remove);
+>>>>>>>
+
+MODULE_DESCRIPTION("Q6ASM dai driver");
+MODULE_LICENSE("GPL v2");
diff --git a/rr-cache/9f497fbd837e3e347df386df77071ea45b8db013/thisimage b/rr-cache/9f497fbd837e3e347df386df77071ea45b8db013/thisimage
new file mode 100644
index 0000000..a8d6c6f
--- /dev/null
+++ b/rr-cache/9f497fbd837e3e347df386df77071ea45b8db013/thisimage
@@ -0,0 +1,953 @@
+// SPDX-License-Identifier: GPL-2.0
+<<<<<<<
+/*
+ * Copyright (c) 2011-2016, The Linux Foundation
+ * Copyright (c) 2017, Linaro Limited
+ */
+=======
+// Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+// Copyright (c) 2018, Linaro Limited
+>>>>>>>
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+<<<<<<<
+=======
+#include <linux/component.h>
+#include <sound/soc.h>
+>>>>>>>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include <asm/dma.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_device.h>
+#include <sound/pcm_params.h>
+#include "q6asm.h"
+#include "q6routing.h"
+#include "q6dsp-errno.h"
+
+<<<<<<<
+=======
+#define DRV_NAME "q6asm-fe-dai"
+
+>>>>>>>
+#define PLAYBACK_MIN_NUM_PERIODS 2
+#define PLAYBACK_MAX_NUM_PERIODS 8
+#define PLAYBACK_MAX_PERIOD_SIZE 65536
+#define PLAYBACK_MIN_PERIOD_SIZE 128
+#define CAPTURE_MIN_NUM_PERIODS 2
+#define CAPTURE_MAX_NUM_PERIODS 8
+#define CAPTURE_MAX_PERIOD_SIZE 4096
+#define CAPTURE_MIN_PERIOD_SIZE 320
+<<<<<<<
+=======
+#define SID_MASK_DEFAULT 0xF
+>>>>>>>
+
+enum stream_state {
+ Q6ASM_STREAM_IDLE = 0,
+ Q6ASM_STREAM_STOPPED,
+ Q6ASM_STREAM_RUNNING,
+};
+
+struct q6asm_dai_rtd {
+ struct snd_pcm_substream *substream;
+ phys_addr_t phys;
+ unsigned int pcm_size;
+ unsigned int pcm_count;
+ unsigned int pcm_irq_pos; /* IRQ position */
+ unsigned int periods;
+ uint16_t bits_per_sample;
+ uint16_t source; /* Encoding source bit mask */
+ struct audio_client *audio_client;
+ uint16_t session_id;
+ enum stream_state state;
+};
+
+struct q6asm_dai_data {
+ long long int sid;
+};
+
+static struct snd_pcm_hardware q6asm_dai_hardware_capture = {
+ .info = (SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE),
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ .channels_min = 1,
+ .channels_max = 4,
+ .buffer_bytes_max = CAPTURE_MAX_NUM_PERIODS *
+ CAPTURE_MAX_PERIOD_SIZE,
+ .period_bytes_min = CAPTURE_MIN_PERIOD_SIZE,
+ .period_bytes_max = CAPTURE_MAX_PERIOD_SIZE,
+ .periods_min = CAPTURE_MIN_NUM_PERIODS,
+ .periods_max = CAPTURE_MAX_NUM_PERIODS,
+ .fifo_size = 0,
+};
+
+static struct snd_pcm_hardware q6asm_dai_hardware_playback = {
+ .info = (SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE),
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ .channels_min = 1,
+ .channels_max = 8,
+ .buffer_bytes_max = (PLAYBACK_MAX_NUM_PERIODS *
+ PLAYBACK_MAX_PERIOD_SIZE),
+ .period_bytes_min = PLAYBACK_MIN_PERIOD_SIZE,
+ .period_bytes_max = PLAYBACK_MAX_PERIOD_SIZE,
+ .periods_min = PLAYBACK_MIN_NUM_PERIODS,
+ .periods_max = PLAYBACK_MAX_NUM_PERIODS,
+ .fifo_size = 0,
+};
+
+<<<<<<<
+=======
+#define Q6ASM_FEDAI_DRIVER(num) { \
+ .playback = { \
+ .stream_name = "MultiMedia"#num" Playback", \
+ .rates = (SNDRV_PCM_RATE_8000_192000| \
+ SNDRV_PCM_RATE_KNOT), \
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE), \
+ .channels_min = 1, \
+ .channels_max = 8, \
+ .rate_min = 8000, \
+ .rate_max = 192000, \
+ }, \
+ .capture = { \
+ .stream_name = "MultiMedia"#num" Capture", \
+ .rates = (SNDRV_PCM_RATE_8000_48000| \
+ SNDRV_PCM_RATE_KNOT), \
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE), \
+ .channels_min = 1, \
+ .channels_max = 4, \
+ .rate_min = 8000, \
+ .rate_max = 48000, \
+ }, \
+ .name = "MultiMedia"#num, \
+ .probe = fe_dai_probe, \
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA##num, \
+ }
+
+>>>>>>>
+/* Conventional and unconventional sample rate supported */
+static unsigned int supported_sample_rates[] = {
+ 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000,
+ 88200, 96000, 176400, 192000
+};
+
+static struct snd_pcm_hw_constraint_list constraints_sample_rates = {
+ .count = ARRAY_SIZE(supported_sample_rates),
+ .list = supported_sample_rates,
+ .mask = 0,
+};
+
+static void event_handler(uint32_t opcode, uint32_t token,
+ uint32_t *payload, void *priv)
+{
+ struct q6asm_dai_rtd *prtd = priv;
+ struct snd_pcm_substream *substream = prtd->substream;
+
+ switch (opcode) {
+ case ASM_CLIENT_EVENT_CMD_RUN_DONE:
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ q6asm_write_async(prtd->audio_client,
+ prtd->pcm_count, 0, 0, NO_TIMESTAMP);
+ break;
+ case ASM_CLIENT_EVENT_CMD_EOS_DONE:
+ prtd->state = Q6ASM_STREAM_STOPPED;
+ break;
+ case ASM_CLIENT_EVENT_DATA_WRITE_DONE: {
+ prtd->pcm_irq_pos += prtd->pcm_count;
+ snd_pcm_period_elapsed(substream);
+ if (prtd->state == Q6ASM_STREAM_RUNNING)
+ q6asm_write_async(prtd->audio_client,
+ prtd->pcm_count, 0, 0, NO_TIMESTAMP);
+
+ break;
+ }
+ case ASM_CLIENT_EVENT_DATA_READ_DONE:
+ prtd->pcm_irq_pos += prtd->pcm_count;
+ snd_pcm_period_elapsed(substream);
+ if (prtd->state == Q6ASM_STREAM_RUNNING)
+ q6asm_read(prtd->audio_client);
+
+ break;
+ default:
+ break;
+ }
+}
+
+static int q6asm_dai_prepare(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
+ struct q6asm_dai_rtd *prtd = runtime->private_data;
+<<<<<<<
+ struct q6asm_dai_data *pdata;
+ int ret, i;
+
+ pdata = q6asm_get_dai_data(soc_prtd->platform->dev);
+=======
+ struct snd_soc_component *c = snd_soc_rtdcom_lookup(soc_prtd, DRV_NAME);
+ struct q6asm_dai_data *pdata;
+ int ret, i;
+
+ pdata = snd_soc_component_get_drvdata(c);
+>>>>>>>
+ if (!pdata)
+ return -EINVAL;
+
+ if (!prtd || !prtd->audio_client) {
+ pr_err("%s: private data null or audio client freed\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ prtd->pcm_count = snd_pcm_lib_period_bytes(substream);
+ prtd->pcm_irq_pos = 0;
+ /* rate and channels are sent to audio driver */
+ if (prtd->state) {
+ /* clear the previous setup if any */
+ q6asm_cmd(prtd->audio_client, CMD_CLOSE);
+ q6asm_unmap_memory_regions(substream->stream,
+ prtd->audio_client);
+ q6routing_stream_close(soc_prtd->dai_link->id,
+ substream->stream);
+ }
+
+ ret = q6asm_map_memory_regions(substream->stream, prtd->audio_client,
+ prtd->phys,
+ (prtd->pcm_size / prtd->periods),
+ prtd->periods);
+
+ if (ret < 0) {
+ pr_err("Audio Start: Buffer Allocation failed rc = %d\n",
+ ret);
+ return -ENOMEM;
+ }
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ ret = q6asm_open_write(prtd->audio_client, FORMAT_LINEAR_PCM,
+ prtd->bits_per_sample);
+<<<<<<<
+ } else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+ ret = q6asm_open_read(prtd->audio_client, FORMAT_LINEAR_PCM,
+ prtd->bits_per_sample);
+ }
+
+=======
+ }else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+ ret = q6asm_open_read(prtd->audio_client, FORMAT_LINEAR_PCM,
+ prtd->bits_per_sample);
+
+ }
+>>>>>>>
+ if (ret < 0) {
+ pr_err("%s: q6asm_open_write failed\n", __func__);
+ q6asm_audio_client_free(prtd->audio_client);
+ prtd->audio_client = NULL;
+ return -ENOMEM;
+ }
+
+ prtd->session_id = q6asm_get_session_id(prtd->audio_client);
+ ret = q6routing_stream_open(soc_prtd->dai_link->id, LEGACY_PCM_MODE,
+ prtd->session_id, substream->stream);
+ if (ret) {
+ pr_err("%s: stream reg failed ret:%d\n", __func__, ret);
+ return ret;
+ }
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ ret = q6asm_media_format_block_multi_ch_pcm(
+ prtd->audio_client, runtime->rate,
+ runtime->channels, NULL,
+ prtd->bits_per_sample);
+<<<<<<<
+ } else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+=======
+ }else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+>>>>>>>
+ ret = q6asm_enc_cfg_blk_pcm_format_support(prtd->audio_client,
+ runtime->rate, runtime->channels,
+ prtd->bits_per_sample);
+
+ /* Queue the buffers */
+ for (i = 0; i < runtime->periods; i++)
+ q6asm_read(prtd->audio_client);
+
+ }
+ if (ret < 0)
+ pr_info("%s: CMD Format block failed\n", __func__);
+
+ prtd->state = Q6ASM_STREAM_RUNNING;
+
+ return 0;
+}
+
+static int q6asm_dai_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ int ret = 0;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct q6asm_dai_rtd *prtd = runtime->private_data;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ ret = q6asm_run_nowait(prtd->audio_client, 0, 0, 0);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ prtd->state = Q6ASM_STREAM_STOPPED;
+ ret = q6asm_cmd_nowait(prtd->audio_client, CMD_EOS);
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ ret = q6asm_cmd_nowait(prtd->audio_client, CMD_PAUSE);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int q6asm_dai_open(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = soc_prtd->cpu_dai;
+<<<<<<<
+ struct snd_soc_component *c = snd_soc_rtdcom_lookup(soc_prtd, DRV_NAME);
+ struct q6asm_dai_rtd *prtd;
+ struct q6asm_dai_data *pdata;
+ struct device *dev = c->dev;
+=======
+
+ struct q6asm_dai_rtd *prtd;
+ struct q6asm_dai_data *pdata;
+ struct device *dev = soc_prtd->platform->dev;
+>>>>>>>
+ int ret = 0;
+ int stream_id;
+
+ stream_id = cpu_dai->driver->id;
+
+<<<<<<<
+ pdata = q6asm_get_dai_data(dev);
+ if (!pdata) {
+ pr_err("Platform data not found ..\n");
+=======
+ pdata = snd_soc_component_get_drvdata(c);
+ if (!pdata) {
+ pr_err("Drv data not found ..\n");
+>>>>>>>
+ return -EINVAL;
+ }
+
+ prtd = kzalloc(sizeof(struct q6asm_dai_rtd), GFP_KERNEL);
+ if (prtd == NULL)
+ return -ENOMEM;
+
+ prtd->substream = substream;
+ prtd->audio_client = q6asm_audio_client_alloc(dev,
+<<<<<<<
+ (q6asm_cb)event_handler, prtd, stream_id);
+=======
+ (q6asm_cb)event_handler, prtd, stream_id,
+ LEGACY_PCM_MODE);
+>>>>>>>
+ if (!prtd->audio_client) {
+ pr_info("%s: Could not allocate memory\n", __func__);
+ kfree(prtd);
+ return -ENOMEM;
+ }
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ runtime->hw = q6asm_dai_hardware_playback;
+ else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ runtime->hw = q6asm_dai_hardware_capture;
+
+ ret = snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_sample_rates);
+ if (ret < 0)
+ pr_info("snd_pcm_hw_constraint_list failed\n");
+ /* Ensure that buffer size is a multiple of period size */
+ ret = snd_pcm_hw_constraint_integer(runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (ret < 0)
+ pr_info("snd_pcm_hw_constraint_integer failed\n");
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ ret = snd_pcm_hw_constraint_minmax(runtime,
+ SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
+ PLAYBACK_MIN_NUM_PERIODS * PLAYBACK_MIN_PERIOD_SIZE,
+ PLAYBACK_MAX_NUM_PERIODS * PLAYBACK_MAX_PERIOD_SIZE);
+ if (ret < 0) {
+ pr_err("constraint for buffer bytes min max ret = %d\n",
+ ret);
+ }
+ }
+
+ ret = snd_pcm_hw_constraint_step(runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 32);
+ if (ret < 0) {
+ pr_err("constraint for period bytes step ret = %d\n",
+ ret);
+ }
+ ret = snd_pcm_hw_constraint_step(runtime, 0,
+ SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 32);
+ if (ret < 0) {
+ pr_err("constraint for buffer bytes step ret = %d\n",
+ ret);
+ }
+
+ runtime->private_data = prtd;
+
+ snd_soc_set_runtime_hwparams(substream, &q6asm_dai_hardware_playback);
+
+ runtime->dma_bytes = q6asm_dai_hardware_playback.buffer_bytes_max;
+
+
+ if (pdata->sid < 0)
+ prtd->phys = substream->dma_buffer.addr;
+ else
+ prtd->phys = substream->dma_buffer.addr | (pdata->sid << 32);
+
+ snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
+
+ return 0;
+}
+
+static int q6asm_dai_close(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
+ struct q6asm_dai_rtd *prtd = runtime->private_data;
+
+ if (prtd->audio_client) {
+ q6asm_cmd(prtd->audio_client, CMD_CLOSE);
+ q6asm_unmap_memory_regions(substream->stream,
+ prtd->audio_client);
+ q6asm_audio_client_free(prtd->audio_client);
+<<<<<<<
+=======
+ prtd->audio_client = NULL;
+>>>>>>>
+ }
+ q6routing_stream_close(soc_prtd->dai_link->id,
+ substream->stream);
+ kfree(prtd);
+ return 0;
+}
+
+static snd_pcm_uframes_t q6asm_dai_pointer(struct snd_pcm_substream *substream)
+{
+
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct q6asm_dai_rtd *prtd = runtime->private_data;
+
+ if (prtd->pcm_irq_pos >= prtd->pcm_size)
+ prtd->pcm_irq_pos = 0;
+
+ return bytes_to_frames(runtime, (prtd->pcm_irq_pos));
+}
+
+static int q6asm_dai_mmap(struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma)
+{
+
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
+<<<<<<<
+ struct device *dev = soc_prtd->platform->dev->parent;
+=======
+ struct snd_soc_component *c = snd_soc_rtdcom_lookup(soc_prtd, DRV_NAME);
+ struct device *dev = c->dev;
+>>>>>>>
+
+ return dma_mmap_coherent(dev, vma,
+ runtime->dma_area, runtime->dma_addr,
+ runtime->dma_bytes);
+}
+
+static int q6asm_dai_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct q6asm_dai_rtd *prtd = runtime->private_data;
+
+ prtd->pcm_size = params_buffer_bytes(params);
+ prtd->periods = params_periods(params);
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ prtd->bits_per_sample = 16;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ prtd->bits_per_sample = 24;
+ break;
+ }
+
+ return 0;
+}
+
+static struct snd_pcm_ops q6asm_dai_ops = {
+ .open = q6asm_dai_open,
+ .hw_params = q6asm_dai_hw_params,
+ .close = q6asm_dai_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .prepare = q6asm_dai_prepare,
+ .trigger = q6asm_dai_trigger,
+ .pointer = q6asm_dai_pointer,
+ .mmap = q6asm_dai_mmap,
+};
+
+static int q6asm_dai_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_pcm_substream *psubstream, *csubstream;
+<<<<<<<
+ struct of_phandle_args args;
+ struct device_node *node;
+ struct q6asm_dai_data *pdata;
+=======
+ struct snd_soc_component *c = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
+>>>>>>>
+ struct snd_pcm *pcm = rtd->pcm;
+ struct device *dev;
+ int size, ret;
+
+<<<<<<<
+ dev = c->dev;
+ size = q6asm_dai_hardware_playback.buffer_bytes_max;
+ psubstream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+ if (psubstream) {
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, dev, size,
+ &psubstream->dma_buffer);
+ if (ret) {
+ dev_err(dev, "Cannot allocate buffer(s)\n");
+ return ret;
+ }
+ }
+
+ csubstream = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream;
+ if (csubstream) {
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, dev, size,
+ &csubstream->dma_buffer);
+ if (ret) {
+ dev_err(dev, "Cannot allocate buffer(s)\n");
+ if (psubstream)
+ snd_dma_free_pages(&psubstream->dma_buffer);
+ return ret;
+ }
+ }
+=======
+ dev = rtd->platform->dev->parent;
+ node = dev->of_node;
+ pdata = q6asm_get_dai_data(rtd->platform->dev);
+
+ ret = of_parse_phandle_with_fixed_args(node, "iommus", 1, 0, &args);
+ if (ret < 0)
+ pdata->sid = -1;
+ else
+ pdata->sid = args.args[0];
+
+ size = q6asm_dai_hardware_playback.buffer_bytes_max;
+
+ psubstream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+ if (psubstream) {
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, dev, size,
+ &psubstream->dma_buffer);
+ if (ret) {
+ dev_err(dev, "Cannot allocate buffer(s)\n");
+ return ret;
+ }
+ }
+
+ csubstream = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream;
+ if (csubstream) {
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, dev, size,
+ &csubstream->dma_buffer);
+ if (ret) {
+ dev_err(dev, "Cannot allocate buffer(s)\n");
+ if (psubstream)
+ snd_dma_free_pages(&psubstream->dma_buffer);
+ return ret;
+ }
+ }
+
+>>>>>>>
+
+ return ret;
+}
+
+static void q6asm_dai_pcm_free(struct snd_pcm *pcm)
+{
+ struct snd_pcm_substream *substream;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pcm->streams); i++) {
+ substream = pcm->streams[i].substream;
+ if (substream) {
+ snd_dma_free_pages(&substream->dma_buffer);
+ substream->dma_buffer.area = NULL;
+ substream->dma_buffer.addr = 0;
+ }
+ }
+}
+
+<<<<<<<
+=======
+static struct snd_soc_platform_driver q6asm_soc_platform = {
+ .ops = &q6asm_dai_ops,
+ .pcm_new = q6asm_dai_pcm_new,
+ .pcm_free = q6asm_dai_pcm_free,
+
+};
+
+>>>>>>>
+static const struct snd_soc_dapm_route afe_pcm_routes[] = {
+ {"MM_DL1", NULL, "MultiMedia1 Playback" },
+ {"MM_DL2", NULL, "MultiMedia2 Playback" },
+ {"MM_DL3", NULL, "MultiMedia3 Playback" },
+ {"MM_DL4", NULL, "MultiMedia4 Playback" },
+ {"MM_DL5", NULL, "MultiMedia5 Playback" },
+ {"MM_DL6", NULL, "MultiMedia6 Playback" },
+ {"MM_DL7", NULL, "MultiMedia7 Playback" },
+<<<<<<<
+ {"MM_DL7", NULL, "MultiMedia8 Playback" },
+ {"MultiMedia1 Capture", NULL, "MM_UL1"},
+ {"MultiMedia2 Capture", NULL, "MM_UL2"},
+ {"MultiMedia3 Capture", NULL, "MM_UL3"},
+ {"MultiMedia4 Capture", NULL, "MM_UL4"},
+ {"MultiMedia5 Capture", NULL, "MM_UL5"},
+ {"MultiMedia6 Capture", NULL, "MM_UL6"},
+ {"MultiMedia7 Capture", NULL, "MM_UL7"},
+ {"MultiMedia8 Capture", NULL, "MM_UL8"},
+=======
+ {"MultiMedia1 Capture" , NULL, "MM_UL1"},
+ {"MultiMedia2 Capture" , NULL, "MM_UL2"},
+ {"MultiMedia4 Capture" , NULL, "MM_UL4"},
+>>>>>>>
+
+};
+
+static int fe_dai_probe(struct snd_soc_dai *dai)
+{
+ struct snd_soc_dapm_context *dapm;
+
+ dapm = snd_soc_component_get_dapm(dai->component);
+ snd_soc_dapm_add_routes(dapm, afe_pcm_routes,
+ ARRAY_SIZE(afe_pcm_routes));
+
+ return 0;
+}
+
+<<<<<<<
+
+static const struct snd_soc_component_driver q6asm_fe_dai_component = {
+ .name = DRV_NAME,
+ .ops = &q6asm_dai_ops,
+ .pcm_new = q6asm_dai_pcm_new,
+ .pcm_free = q6asm_dai_pcm_free,
+
+};
+
+static struct snd_soc_dai_driver q6asm_fe_dais[] = {
+ Q6ASM_FEDAI_DRIVER(1),
+ Q6ASM_FEDAI_DRIVER(2),
+ Q6ASM_FEDAI_DRIVER(3),
+ Q6ASM_FEDAI_DRIVER(4),
+ Q6ASM_FEDAI_DRIVER(5),
+ Q6ASM_FEDAI_DRIVER(6),
+ Q6ASM_FEDAI_DRIVER(7),
+ Q6ASM_FEDAI_DRIVER(8),
+};
+
+static int q6asm_dai_bind(struct device *dev, struct device *master, void *data)
+{
+ struct device_node *node = dev->of_node;
+ struct of_phandle_args args;
+ struct q6asm_dai_data *pdata;
+ int rc;
+
+ pdata = kzalloc(sizeof(struct q6asm_dai_data), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ rc = of_parse_phandle_with_fixed_args(node, "iommus", 1, 0, &args);
+ if (rc < 0)
+ pdata->sid = -1;
+ else
+ pdata->sid = args.args[0] & SID_MASK_DEFAULT;
+
+ dev_set_drvdata(dev, pdata);
+
+ return snd_soc_register_component(dev, &q6asm_fe_dai_component,
+ q6asm_fe_dais,
+ ARRAY_SIZE(q6asm_fe_dais));
+}
+static void q6asm_dai_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct q6asm_dai_data *pdata = dev_get_drvdata(dev);
+
+ snd_soc_unregister_component(dev);
+
+ kfree(pdata);
+
+}
+
+static const struct component_ops q6asm_dai_comp_ops = {
+ .bind = q6asm_dai_bind,
+ .unbind = q6asm_dai_unbind,
+};
+
+static int q6asm_dai_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &q6asm_dai_comp_ops);
+}
+
+static int q6asm_dai_dev_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &q6asm_dai_comp_ops);
+ return 0;
+}
+
+static struct platform_driver q6asm_dai_platform_driver = {
+ .driver = {
+ .name = "q6asm-dai",
+ },
+ .probe = q6asm_dai_probe,
+ .remove = q6asm_dai_dev_remove,
+};
+module_platform_driver(q6asm_dai_platform_driver);
+=======
+static const struct snd_soc_component_driver q6asm_fe_dai_component = {
+ .name = "q6asm-fe-dai",
+};
+
+static struct snd_soc_dai_driver q6asm_fe_dais[] = {
+ {
+ .playback = {
+ .stream_name = "MultiMedia1 Playback",
+ .rates = (SNDRV_PCM_RATE_8000_192000|
+ SNDRV_PCM_RATE_KNOT),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE),
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ .capture = {
+ .stream_name = "MultiMedia1 Capture",
+ .rates = (SNDRV_PCM_RATE_8000_48000|
+ SNDRV_PCM_RATE_KNOT),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE),
+ .channels_min = 1,
+ .channels_max = 4,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .name = "MultiMedia1",
+ .probe = fe_dai_probe,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA1,
+ },
+ {
+ .playback = {
+ .stream_name = "MultiMedia2 Playback",
+ .rates = (SNDRV_PCM_RATE_8000_192000|
+ SNDRV_PCM_RATE_KNOT),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE),
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ .capture = {
+ .stream_name = "MultiMedia2 Capture",
+ .rates = (SNDRV_PCM_RATE_8000_48000|
+ SNDRV_PCM_RATE_KNOT),
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .name = "MultiMedia2",
+ .probe = fe_dai_probe,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA2,
+ },
+ {
+ .playback = {
+ .stream_name = "MultiMedia3 Playback",
+ .rates = (SNDRV_PCM_RATE_8000_192000|
+ SNDRV_PCM_RATE_KNOT),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE),
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ .name = "MultiMedia3",
+ .probe = fe_dai_probe,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA3,
+ },
+ {
+ .playback = {
+ .stream_name = "MultiMedia4 Playback",
+ .rates = (SNDRV_PCM_RATE_8000_192000|
+ SNDRV_PCM_RATE_KNOT),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE),
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ .capture = {
+ .stream_name = "MultiMedia4 Capture",
+ .rates = (SNDRV_PCM_RATE_8000_48000|
+ SNDRV_PCM_RATE_KNOT),
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .name = "MultiMedia4",
+ .probe = fe_dai_probe,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA4,
+ },
+ {
+ .playback = {
+ .stream_name = "MultiMedia5 Playback",
+ .rates = (SNDRV_PCM_RATE_8000_192000|
+ SNDRV_PCM_RATE_KNOT),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE),
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ .capture = {
+ .stream_name = "MultiMedia5 Capture",
+ .rates = (SNDRV_PCM_RATE_8000_48000|
+ SNDRV_PCM_RATE_KNOT),
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .name = "MultiMedia5",
+ .probe = fe_dai_probe,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA5,
+ },
+ {
+ .playback = {
+ .stream_name = "MultiMedia6 Playback",
+ .rates = (SNDRV_PCM_RATE_8000_192000|
+ SNDRV_PCM_RATE_KNOT),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE),
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ .name = "MultiMedia6",
+ .probe = fe_dai_probe,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA6,
+ },
+ {
+ .playback = {
+ .stream_name = "MultiMedia7 Playback",
+ .rates = (SNDRV_PCM_RATE_8000_192000|
+ SNDRV_PCM_RATE_KNOT),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE),
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ .name = "MultiMedia7",
+ .probe = fe_dai_probe,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA7,
+ },
+ {
+ .playback = {
+ .stream_name = "MultiMedia8 Playback",
+ .rates = (SNDRV_PCM_RATE_8000_192000|
+ SNDRV_PCM_RATE_KNOT),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE),
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ .name = "MultiMedia8",
+ .probe = fe_dai_probe,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA8,
+ },
+};
+
+int q6asm_dai_probe(struct device *dev)
+{
+ struct q6asm_dai_data *pdata;
+ int rc;
+
+ pdata = devm_kzalloc(dev, sizeof(struct q6asm_dai_data), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+
+ q6asm_set_dai_data(dev, pdata);
+
+ rc = devm_snd_soc_register_platform(dev, &q6asm_soc_platform);
+ if (rc) {
+ dev_err(dev, "err_dai_platform\n");
+ return rc;
+ }
+
+ return devm_snd_soc_register_component(dev, &q6asm_fe_dai_component,
+ q6asm_fe_dais,
+ ARRAY_SIZE(q6asm_fe_dais));
+}
+EXPORT_SYMBOL_GPL(q6asm_dai_probe);
+
+int q6asm_dai_remove(struct device *dev)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(q6asm_dai_remove);
+>>>>>>>
+
+MODULE_DESCRIPTION("Q6ASM dai driver");
+MODULE_LICENSE("GPL v2");
diff --git a/rr-cache/a6df22f29fac3dfcfddaa23d13d678c7fe51ce0c/preimage b/rr-cache/a6df22f29fac3dfcfddaa23d13d678c7fe51ce0c/preimage
new file mode 100644
index 0000000..86dddea
--- /dev/null
+++ b/rr-cache/a6df22f29fac3dfcfddaa23d13d678c7fe51ce0c/preimage
@@ -0,0 +1,513 @@
+/*
+ * Copyright (c) 2014-2016,2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8996.dtsi"
+#include "pm8994.dtsi"
+#include "pmi8994.dtsi"
+#include "apq8096-db820c-pins.dtsi"
+#include "apq8096-db820c-pmic-pins.dtsi"
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/sound/qcom,q6afe.h>
+#include <dt-bindings/sound/qcom,q6asm.h>
+/ {
+ aliases {
+ serial0 = &blsp2_uart1;
+ serial1 = &blsp2_uart2;
+ serial2 = &blsp1_uart1;
+ i2c0 = &blsp1_i2c2;
+ i2c1 = &blsp2_i2c1;
+ i2c2 = &blsp2_i2c0;
+ spi0 = &blsp1_spi0;
+ spi1 = &blsp2_spi5;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ clocks {
+ divclk4: divclk4 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ clock-output-names = "divclk4";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&divclk4_pin_a>;
+ };
+ };
+
+ soc {
+ serial@7570000 {
+ label = "BT-UART";
+ status = "okay";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp1_uart1_default>;
+ pinctrl-1 = <&blsp1_uart1_sleep>;
+
+ bluetooth {
+ compatible = "qcom,qca6174-bt";
+
+ /* bt_disable_n gpio */
+ enable-gpios = <&pm8994_gpios 19 GPIO_ACTIVE_HIGH>;
+
+ clocks = <&divclk4>;
+ };
+ };
+
+ serial@75b0000 {
+ label = "LS-UART1";
+ status = "okay";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_uart1_2pins_default>;
+ pinctrl-1 = <&blsp2_uart1_2pins_sleep>;
+ };
+
+// serial@75b1000 {
+// label = "LS-UART0";
+// status = "okay";
+// pinctrl-names = "default", "sleep";
+// pinctrl-0 = <&blsp2_uart2_4pins_default>;
+// pinctrl-1 = <&blsp2_uart2_4pins_sleep>;
+// };
+
+ i2c@7577000 {
+ /* On Low speed expansion */
+ label = "LS-I2C0";
+ status = "okay";
+ };
+
+ i2c@75b6000 {
+ /* On Low speed expansion */
+ label = "LS-I2C1";
+ status = "okay";
+ };
+
+ spi@7575000 {
+ /* On Low speed expansion */
+ label = "LS-SPI0";
+ status = "okay";
+ };
+
+ i2c@75b5000 {
+ /* On High speed expansion */
+ label = "HS-I2C2";
+ status = "okay";
+ };
+
+ spi@75ba000{
+ /* On High speed expansion */
+ label = "HS-SPI1";
+ status = "okay";
+ };
+
+ sdhci@74a4900 {
+ /* External SD card */
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
+ pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>;
+ cd-gpios = <&msmgpio 38 0x1>;
+ vmmc-supply = <&pm8994_l21>;
+ vqmmc-supply = <&pm8994_l13>;
+<<<<<<<
+=======
+ status = "okay";
+ };
+
+ phy@627000 {
+ status = "okay";
+ };
+
+ ufshc@624000 {
+>>>>>>>
+ status = "okay";
+ };
+
+ phy@34000 {
+ status = "okay";
+ };
+
+ phy@7410000 {
+ status = "okay";
+ };
+
+ phy@7411000 {
+ status = "okay";
+ };
+
+ phy@7412000 {
+ status = "okay";
+ };
+
+ usb@6a00000 {
+ status = "okay";
+
+ dwc3@6a00000 {
+ extcon = <&usb3_id>;
+ dr_mode = "otg";
+ };
+ };
+
+ usb3_id: usb3-id {
+ compatible = "linux,extcon-usb-gpio";
+ id-gpio = <&pm8994_gpios 22 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb3_vbus_det_gpio>;
+ };
+
+ usb@7600000 {
+ status = "okay";
+
+ dwc3@7600000 {
+ extcon = <&usb2_id>;
+ dr_mode = "otg";
+ maximum-speed = "high-speed";
+ };
+ };
+
+ usb2_id: usb2-id {
+ compatible = "linux,extcon-usb-gpio";
+ id-gpio = <&pmi8994_gpios 6 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb2_vbus_det_gpio>;
+ };
+
+<<<<<<<
+ wlan_en: wlan-en-1-8v {
+ pinctrl-names = "default";
+ pinctrl-0 = <&wlan_en_gpios>;
+ compatible = "regulator-fixed";
+ regulator-name = "wlan-en-regulator";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&pm8994_gpios 8 0>;
+
+ /* WLAN card specific delay */
+ startup-delay-us = <70000>;
+ enable-active-high;
+ };
+
+ agnoc@0 {
+ pcie@600000 {
+ status = "okay";
+ perst-gpio = <&msmgpio 35 GPIO_ACTIVE_LOW>;
+ vddpe-3v3-supply = <&wlan_en>;
+=======
+ bt_en: bt-en-1-8v {
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_en_gpios>;
+ compatible = "regulator-fixed";
+ regulator-name = "bt-en-regulator";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&pm8994_gpios 19 0>;
+
+ /* WLAN card specific delay */
+ startup-delay-us = <70000>;
+ enable-active-high;
+ };
+
+ wlan_en: wlan-en-1-8v {
+ pinctrl-names = "default";
+ pinctrl-0 = <&wlan_en_gpios>;
+ compatible = "regulator-fixed";
+ regulator-name = "wlan-en-regulator";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&pm8994_gpios 8 0>;
+
+ /* WLAN card specific delay */
+ startup-delay-us = <70000>;
+ enable-active-high;
+ };
+
+
+ agnoc@0 {
+ qcom,pcie@600000 {
+ status = "okay";
+ perst-gpio = <&msmgpio 35 GPIO_ACTIVE_LOW>;
+ vddpe-3v3-supply = <&wlan_en>;
+ vddpe1-supply = <&bt_en>;
+>>>>>>>
+ };
+
+ pcie@608000 {
+ status = "okay";
+ perst-gpio = <&msmgpio 130 GPIO_ACTIVE_LOW>;
+ };
+
+ pcie@610000 {
+ status = "okay";
+ perst-gpio = <&msmgpio 114 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ ufsphy@627000 {
+ status = "okay";
+ };
+
+ ufshc@624000 {
+ status = "okay";
+ };
+
+ mdss@900000 {
+ status = "okay";
+
+ mdp@901000 {
+ status = "okay";
+ };
+
+ hdmi-phy@9a0600 {
+ status = "okay";
+
+ vddio-supply = <&pm8994_l12>;
+ vcca-supply = <&pm8994_l28>;
+ #phy-cells = <0>;
+ };
+
+ hdmi-tx@9a0000 {
+ status = "okay";
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&hdmi_hpd_active &hdmi_ddc_active>;
+ pinctrl-1 = <&hdmi_hpd_suspend &hdmi_ddc_suspend>;
+
+ core-vdda-supply = <&pm8994_l12>;
+ core-vcc-supply = <&pm8994_s4>;
+ #sound-dai-cells = <1>;
+ };
+ };
+
+ slim_msm:sc {
+ tasha_codec: tas {
+ pinctrl-0 = <&cdc_reset_active &wcd_intr_default &audio_mclk>;
+ pinctrl-1 = <&cdc_reset_sleep>;
+ pinctrl-names = "default", "sleep";
+ };
+ };
+ };
+
+ gpio_keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ autorepeat;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&volume_up_gpio>;
+
+ button@0 {
+ label = "Volume Up";
+ linux,code = <KEY_VOLUMEUP>;
+ gpios = <&pm8994_gpios 2 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ rpm-glink {
+ rpm_requests {
+ pm8994-regulators {
+ vdd_l1-supply = <&pm8994_s3>;
+ vdd_l2_l26_l28-supply = <&pm8994_s3>;
+ vdd_l3_l11-supply = <&pm8994_s3>;
+ vdd_l4_l27_l31-supply = <&pm8994_s3>;
+ vdd_l5_l7-supply = <&pm8994_s5>;
+ vdd_l14_l15-supply = <&pm8994_s5>;
+ vdd_l20_l21-supply = <&pm8994_s5>;
+ vdd_l25-supply = <&pm8994_s3>;
+
+ s3 {
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <1300000>;
+ };
+
+ /**
+ * 1.8v required on LS expansion
+ * for mezzanine boards
+ */
+ s4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+ s5 {
+ regulator-min-microvolt = <2150000>;
+ regulator-max-microvolt = <2150000>;
+ };
+ s7 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ };
+
+ l1 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ };
+ l2 {
+ regulator-min-microvolt = <1250000>;
+ regulator-max-microvolt = <1250000>;
+ };
+ l3 {
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ };
+ l4 {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ };
+ l6 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+ l8 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l9 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l10 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l11 {
+ regulator-min-microvolt = <1150000>;
+ regulator-max-microvolt = <1150000>;
+ };
+ l12 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l13 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ };
+ l14 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l15 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l16 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2700000>;
+ };
+ l17 {
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ };
+ l18 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2900000>;
+ };
+ l19 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ };
+ l20 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ regulator-allow-set-load;
+ };
+ l21 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ };
+ l22 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+ l23 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ };
+ l24 {
+ regulator-min-microvolt = <3075000>;
+ regulator-max-microvolt = <3075000>;
+ };
+ l25 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-allow-set-load;
+ };
+ l27 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ };
+ l28 {
+ regulator-min-microvolt = <925000>;
+ regulator-max-microvolt = <925000>;
+ regulator-allow-set-load;
+ };
+ l29 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ };
+ l30 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l32 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ };
+ };
+ };
+ adsp-pil {
+ power-domains = <&gcc HLOS1_VOTE_LPASS_ADSP_GDSC>;
+ smd-edge {
+ apr {
+ iommus = <&lpass_q6_smmu 1>;
+ audio {
+ compatible = "qcom,apq8096-sndcard";
+ qcom,model = "DB820c";
+ qcom,audio-routing =
+ "RX_BIAS", "MCLK";
+
+ fe@1 {
+ is-fe;
+ link-name = "MultiMedia1 Playback";
+ cpu {
+ sound-dai = <&q6asm MSM_FRONTEND_DAI_MULTIMEDIA1>;
+ };
+ platform {
+ sound-dai = <&q6asm MSM_FRONTEND_DAI_MULTIMEDIA1>;
+ };
+ };
+
+ be@1 {
+ link-name = "HDMI Playback";
+ cpu {
+ sound-dai = <&q6afe AFE_PORT_HDMI_RX>;
+ };
+
+ platform {
+ sound-dai = <&q6adm>;
+ };
+
+ codec {
+ sound-dai = <&hdmi 0>;
+ };
+ };
+ };
+ };
+ };
+ };
+};
diff --git a/rr-cache/a8dd18ed0fa01371cd5de7b73610e430e01651d8/postimage.1 b/rr-cache/a8dd18ed0fa01371cd5de7b73610e430e01651d8/postimage.1
new file mode 100644
index 0000000..1c0d06f
--- /dev/null
+++ b/rr-cache/a8dd18ed0fa01371cd5de7b73610e430e01651d8/postimage.1
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+&msmgpio {
+ sdc2_cd_on: sdc2_cd_on {
+ mux {
+ pins = "gpio38";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio38";
+ bias-pull-up; /* pull up */
+ drive-strength = <16>; /* 16 MA */
+ };
+ };
+
+ sdc2_cd_off: sdc2_cd_off {
+ mux {
+ pins = "gpio38";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio38";
+ bias-pull-up; /* pull up */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ blsp1_uart1_default: blsp1_uart1_default {
+ mux {
+ pins = "gpio41", "gpio42", "gpio43", "gpio44";
+ function = "blsp_uart2";
+ };
+
+ config {
+ pins = "gpio41", "gpio42", "gpio43", "gpio44";
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ blsp1_uart1_sleep: blsp1_uart1_sleep {
+ mux {
+ pins = "gpio41", "gpio42", "gpio43", "gpio44";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio41", "gpio42", "gpio43", "gpio44";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ hdmi_hpd_active: hdmi_hpd_active {
+ mux {
+ pins = "gpio34";
+ function = "hdmi_hot";
+ };
+
+ config {
+ pins = "gpio34";
+ bias-pull-down;
+ drive-strength = <16>;
+ };
+ };
+
+ hdmi_hpd_suspend: hdmi_hpd_suspend {
+ mux {
+ pins = "gpio34";
+ function = "hdmi_hot";
+ };
+
+ config {
+ pins = "gpio34";
+ bias-pull-down;
+ drive-strength = <2>;
+ };
+ };
+
+ hdmi_ddc_active: hdmi_ddc_active {
+ mux {
+ pins = "gpio32", "gpio33";
+ function = "hdmi_ddc";
+ };
+
+ config {
+ pins = "gpio32", "gpio33";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ hdmi_ddc_suspend: hdmi_ddc_suspend {
+ mux {
+ pins = "gpio32", "gpio33";
+ function = "hdmi_ddc";
+ };
+
+ config {
+ pins = "gpio32", "gpio33";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+};
diff --git a/rr-cache/a8dd18ed0fa01371cd5de7b73610e430e01651d8/preimage b/rr-cache/a8dd18ed0fa01371cd5de7b73610e430e01651d8/preimage
new file mode 100644
index 0000000..932385a
--- /dev/null
+++ b/rr-cache/a8dd18ed0fa01371cd5de7b73610e430e01651d8/preimage
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+&msmgpio {
+ sdc2_cd_on: sdc2_cd_on {
+ mux {
+ pins = "gpio38";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio38";
+ bias-pull-up; /* pull up */
+ drive-strength = <16>; /* 16 MA */
+ };
+ };
+
+ sdc2_cd_off: sdc2_cd_off {
+ mux {
+ pins = "gpio38";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio38";
+ bias-pull-up; /* pull up */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+<<<<<<<
+ blsp1_uart1_default: blsp1_uart1_default {
+ mux {
+ pins = "gpio41", "gpio42", "gpio43", "gpio44";
+ function = "blsp_uart2";
+ };
+
+ config {
+ pins = "gpio41", "gpio42", "gpio43", "gpio44";
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ blsp1_uart1_sleep: blsp1_uart1_sleep {
+ mux {
+ pins = "gpio41", "gpio42", "gpio43", "gpio44";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio41", "gpio42", "gpio43", "gpio44";
+ drive-strength = <2>;
+ bias-disable;
+=======
+ hdmi_hpd_active: hdmi_hpd_active {
+ mux {
+ pins = "gpio34";
+ function = "hdmi_hot";
+ };
+
+ config {
+ pins = "gpio34";
+ bias-pull-down;
+ drive-strength = <16>;
+ };
+ };
+
+ hdmi_hpd_suspend: hdmi_hpd_suspend {
+ mux {
+ pins = "gpio34";
+ function = "hdmi_hot";
+ };
+
+ config {
+ pins = "gpio34";
+ bias-pull-down;
+ drive-strength = <2>;
+ };
+ };
+
+ hdmi_ddc_active: hdmi_ddc_active {
+ mux {
+ pins = "gpio32", "gpio33";
+ function = "hdmi_ddc";
+ };
+
+ config {
+ pins = "gpio32", "gpio33";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ hdmi_ddc_suspend: hdmi_ddc_suspend {
+ mux {
+ pins = "gpio32", "gpio33";
+ function = "hdmi_ddc";
+ };
+
+ config {
+ pins = "gpio32", "gpio33";
+ drive-strength = <2>;
+ bias-pull-down;
+>>>>>>>
+ };
+ };
+};
diff --git a/rr-cache/a8dd18ed0fa01371cd5de7b73610e430e01651d8/preimage.1 b/rr-cache/a8dd18ed0fa01371cd5de7b73610e430e01651d8/preimage.1
new file mode 100644
index 0000000..932385a
--- /dev/null
+++ b/rr-cache/a8dd18ed0fa01371cd5de7b73610e430e01651d8/preimage.1
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+&msmgpio {
+ sdc2_cd_on: sdc2_cd_on {
+ mux {
+ pins = "gpio38";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio38";
+ bias-pull-up; /* pull up */
+ drive-strength = <16>; /* 16 MA */
+ };
+ };
+
+ sdc2_cd_off: sdc2_cd_off {
+ mux {
+ pins = "gpio38";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio38";
+ bias-pull-up; /* pull up */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+<<<<<<<
+ blsp1_uart1_default: blsp1_uart1_default {
+ mux {
+ pins = "gpio41", "gpio42", "gpio43", "gpio44";
+ function = "blsp_uart2";
+ };
+
+ config {
+ pins = "gpio41", "gpio42", "gpio43", "gpio44";
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ blsp1_uart1_sleep: blsp1_uart1_sleep {
+ mux {
+ pins = "gpio41", "gpio42", "gpio43", "gpio44";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio41", "gpio42", "gpio43", "gpio44";
+ drive-strength = <2>;
+ bias-disable;
+=======
+ hdmi_hpd_active: hdmi_hpd_active {
+ mux {
+ pins = "gpio34";
+ function = "hdmi_hot";
+ };
+
+ config {
+ pins = "gpio34";
+ bias-pull-down;
+ drive-strength = <16>;
+ };
+ };
+
+ hdmi_hpd_suspend: hdmi_hpd_suspend {
+ mux {
+ pins = "gpio34";
+ function = "hdmi_hot";
+ };
+
+ config {
+ pins = "gpio34";
+ bias-pull-down;
+ drive-strength = <2>;
+ };
+ };
+
+ hdmi_ddc_active: hdmi_ddc_active {
+ mux {
+ pins = "gpio32", "gpio33";
+ function = "hdmi_ddc";
+ };
+
+ config {
+ pins = "gpio32", "gpio33";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ hdmi_ddc_suspend: hdmi_ddc_suspend {
+ mux {
+ pins = "gpio32", "gpio33";
+ function = "hdmi_ddc";
+ };
+
+ config {
+ pins = "gpio32", "gpio33";
+ drive-strength = <2>;
+ bias-pull-down;
+>>>>>>>
+ };
+ };
+};
diff --git a/rr-cache/a8dd18ed0fa01371cd5de7b73610e430e01651d8/thisimage.1 b/rr-cache/a8dd18ed0fa01371cd5de7b73610e430e01651d8/thisimage.1
new file mode 100644
index 0000000..932385a
--- /dev/null
+++ b/rr-cache/a8dd18ed0fa01371cd5de7b73610e430e01651d8/thisimage.1
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+&msmgpio {
+ sdc2_cd_on: sdc2_cd_on {
+ mux {
+ pins = "gpio38";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio38";
+ bias-pull-up; /* pull up */
+ drive-strength = <16>; /* 16 MA */
+ };
+ };
+
+ sdc2_cd_off: sdc2_cd_off {
+ mux {
+ pins = "gpio38";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio38";
+ bias-pull-up; /* pull up */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+<<<<<<<
+ blsp1_uart1_default: blsp1_uart1_default {
+ mux {
+ pins = "gpio41", "gpio42", "gpio43", "gpio44";
+ function = "blsp_uart2";
+ };
+
+ config {
+ pins = "gpio41", "gpio42", "gpio43", "gpio44";
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ blsp1_uart1_sleep: blsp1_uart1_sleep {
+ mux {
+ pins = "gpio41", "gpio42", "gpio43", "gpio44";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio41", "gpio42", "gpio43", "gpio44";
+ drive-strength = <2>;
+ bias-disable;
+=======
+ hdmi_hpd_active: hdmi_hpd_active {
+ mux {
+ pins = "gpio34";
+ function = "hdmi_hot";
+ };
+
+ config {
+ pins = "gpio34";
+ bias-pull-down;
+ drive-strength = <16>;
+ };
+ };
+
+ hdmi_hpd_suspend: hdmi_hpd_suspend {
+ mux {
+ pins = "gpio34";
+ function = "hdmi_hot";
+ };
+
+ config {
+ pins = "gpio34";
+ bias-pull-down;
+ drive-strength = <2>;
+ };
+ };
+
+ hdmi_ddc_active: hdmi_ddc_active {
+ mux {
+ pins = "gpio32", "gpio33";
+ function = "hdmi_ddc";
+ };
+
+ config {
+ pins = "gpio32", "gpio33";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ hdmi_ddc_suspend: hdmi_ddc_suspend {
+ mux {
+ pins = "gpio32", "gpio33";
+ function = "hdmi_ddc";
+ };
+
+ config {
+ pins = "gpio32", "gpio33";
+ drive-strength = <2>;
+ bias-pull-down;
+>>>>>>>
+ };
+ };
+};
diff --git a/rr-cache/a9a9ab3978419c7b94ec2d91c6577197d01e7fb8/postimage b/rr-cache/a9a9ab3978419c7b94ec2d91c6577197d01e7fb8/postimage
new file mode 100644
index 0000000..4105b1d
--- /dev/null
+++ b/rr-cache/a9a9ab3978419c7b94ec2d91c6577197d01e7fb8/postimage
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __Q6CORE_H__
+#define __Q6CORE_H__
+
+struct q6core_svc_api_info {
+ uint32_t service_id;
+ uint32_t api_version;
+ uint32_t api_branch_version;
+};
+
+bool q6core_is_adsp_ready(void);
+int q6core_get_svc_api_info(int svc_id, struct q6core_svc_api_info *ainfo);
+
+#endif /* __Q6CORE_H__ */
diff --git a/rr-cache/a9a9ab3978419c7b94ec2d91c6577197d01e7fb8/preimage b/rr-cache/a9a9ab3978419c7b94ec2d91c6577197d01e7fb8/preimage
new file mode 100644
index 0000000..a11981e
--- /dev/null
+++ b/rr-cache/a9a9ab3978419c7b94ec2d91c6577197d01e7fb8/preimage
@@ -0,0 +1,24 @@
+<<<<<<<
+/* SPDX-License-Identifier: GPL-2.0 */
+=======
+// SPDX-License-Identifier: GPL-2.0
+>>>>>>>
+
+#ifndef __Q6CORE_H__
+#define __Q6CORE_H__
+
+<<<<<<<
+bool q6core_is_adsp_ready(void);
+uint32_t q6core_get_svc_version(int svc_id);
+=======
+struct q6core_svc_api_info {
+ uint32_t service_id;
+ uint32_t api_version;
+ uint32_t api_branch_version;
+};
+
+bool q6core_is_adsp_ready(void);
+int q6core_get_svc_api_info(int svc_id, struct q6core_svc_api_info *ainfo);
+>>>>>>>
+
+#endif /* __Q6CORE_H__ */
diff --git a/rr-cache/a9a9ab3978419c7b94ec2d91c6577197d01e7fb8/thisimage b/rr-cache/a9a9ab3978419c7b94ec2d91c6577197d01e7fb8/thisimage
new file mode 100644
index 0000000..a11981e
--- /dev/null
+++ b/rr-cache/a9a9ab3978419c7b94ec2d91c6577197d01e7fb8/thisimage
@@ -0,0 +1,24 @@
+<<<<<<<
+/* SPDX-License-Identifier: GPL-2.0 */
+=======
+// SPDX-License-Identifier: GPL-2.0
+>>>>>>>
+
+#ifndef __Q6CORE_H__
+#define __Q6CORE_H__
+
+<<<<<<<
+bool q6core_is_adsp_ready(void);
+uint32_t q6core_get_svc_version(int svc_id);
+=======
+struct q6core_svc_api_info {
+ uint32_t service_id;
+ uint32_t api_version;
+ uint32_t api_branch_version;
+};
+
+bool q6core_is_adsp_ready(void);
+int q6core_get_svc_api_info(int svc_id, struct q6core_svc_api_info *ainfo);
+>>>>>>>
+
+#endif /* __Q6CORE_H__ */
diff --git a/rr-cache/aa3ac109aefb3e67b8ab599b710bba0447979d05/postimage b/rr-cache/aa3ac109aefb3e67b8ab599b710bba0447979d05/postimage
new file mode 100644
index 0000000..bdbf87d
--- /dev/null
+++ b/rr-cache/aa3ac109aefb3e67b8ab599b710bba0447979d05/postimage
@@ -0,0 +1,172 @@
+Qualcomm Audio Front End (Q6AFE) binding
+
+AFE is one of the APR audio service on Q6DSP
+Please refer to qcom,apr.txt for details of the common apr service bindings
+used by all apr services. Must contain the following properties.
+
+- compatible:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "qcom,q6afe-v<MAJOR-NUMBER>.<MINOR-NUMBER>"
+ Or "qcom,q6afe" where the version number can be queried
+ from DSP.
+ example "qcom,q6afe"
+
+= AFE DAIs (Digial Audio Interface)
+"dais" subnode of the AFE node. It represents afe dais, each afe dai is a
+subnode of "dais" representing board specific dai setup.
+"dais" node should have following properties followed by dai children.
+
+- #sound-dai-cells
+ Usage: required
+ Value type: <u32>
+ Definition: Must be 1
+
+- #address-cells
+ Usage: required
+ Value type: <u32>
+ Definition: Must be 1
+
+- #size-cells
+ Usage: required
+ Value type: <u32>
+ Definition: Must be 0
+
+== AFE DAI is subnode of "dais" and represent a dai, it includes board specific
+configuration of each dai. Must contain the following properties.
+
+- reg
+ Usage: required
+ Value type: <u32>
+ Definition: Must be dai id
+
+- qcom,sd-lines
+ Usage: required for mi2s interface
+ Value type: <prop-encoded-array>
+ Definition: Must be list of serial data lines used by this dai.
+ should be one or more of the 1-4 sd lines.
+
+ - qcom,tdm-sync-mode:
+ Usage: required for tdm interface
+ Value type: <prop-encoded-array>
+ Definition: Synchronization mode.
+ 0 - Short sync bit mode
+ 1 - Long sync mode
+ 2 - Short sync slot mode
+
+ - qcom,tdm-sync-src:
+ Usage: required for tdm interface
+ Value type: <prop-encoded-array>
+ Definition: Synchronization source.
+ 0 - External source
+ 1 - Internal source
+
+ - qcom,tdm-data-out:
+ Usage: required for tdm interface
+ Value type: <prop-encoded-array>
+ Definition: Data out signal to drive with other masters.
+ 0 - Disable
+ 1 - Enable
+
+ - qcom,tdm-invert-sync:
+ Usage: required for tdm interface
+ Value type: <prop-encoded-array>
+ Definition: Invert the sync.
+ 0 - Normal
+ 1 - Invert
+
+ - qcom,tdm-data-delay:
+ Usage: required for tdm interface
+ Value type: <prop-encoded-array>
+ Definition: Number of bit clock to delay data
+ with respect to sync edge.
+ 0 - 0 bit clock cycle
+ 1 - 1 bit clock cycle
+ 2 - 2 bit clock cycle
+
+ - qcom,tdm-data-align:
+ Usage: required for tdm interface
+ Value type: <prop-encoded-array>
+ Definition: Indicate how data is packed
+ within the slot. For example, 32 slot width in case of
+ sample bit width is 24.
+ 0 - MSB
+ 1 - LSB
+
+= EXAMPLE
+
+q6afe@4 {
+ compatible = "qcom,q6afe";
+ reg = <APR_SVC_AFE>;
+
+ dais {
+ #sound-dai-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ hdmi@1 {
+ reg = <1>;
+ };
+
+ tdm@24 {
+ reg = <24>;
+ qcom,tdm-sync-mode = <1>:
+ qcom,tdm-sync-src = <1>;
+ qcom,tdm-data-out = <0>;
+ qcom,tdm-invert-sync = <1>;
+ qcom,tdm-data-delay = <1>;
+ qcom,tdm-data-align = <0>;
+
+ };
+
+ tdm@25 {
+ reg = <25>;
+ qcom,tdm-sync-mode = <1>:
+ qcom,tdm-sync-src = <1>;
+ qcom,tdm-data-out = <0>;
+ qcom,tdm-invert-sync = <1>;
+ qcom,tdm-data-delay <1>:
+ qcom,tdm-data-align = <0>;
+ };
+
+ prim-mi2s-rx@16 {
+ reg = <16>;
+ qcom,sd-lines = <1 3>;
+ };
+
+ prim-mi2s-tx@17 {
+ reg = <17>;
+ qcom,sd-lines = <2>;
+ };
+
+ sec-mi2s-rx@18 {
+ reg = <18>;
+ qcom,sd-lines = <1 4>;
+ };
+
+ sec-mi2s-tx@19 {
+ reg = <19>;
+ qcom,sd-lines = <2>;
+ };
+
+ tert-mi2s-rx@20 {
+ reg = <20>;
+ qcom,sd-lines = <2 4>;
+ };
+
+ tert-mi2s-tx@21 {
+ reg = <21>;
+ qcom,sd-lines = <1>;
+ };
+
+ quat-mi2s-rx@22 {
+ reg = <22>;
+ qcom,sd-lines = <1>;
+ };
+
+ quat-mi2s-tx@23 {
+ reg = <23>;
+ qcom,sd-lines = <2>;
+ };
+ };
+};
diff --git a/rr-cache/aa3ac109aefb3e67b8ab599b710bba0447979d05/preimage b/rr-cache/aa3ac109aefb3e67b8ab599b710bba0447979d05/preimage
new file mode 100644
index 0000000..be43d3d
--- /dev/null
+++ b/rr-cache/aa3ac109aefb3e67b8ab599b710bba0447979d05/preimage
@@ -0,0 +1,208 @@
+Qualcomm Audio Front End (Q6AFE) binding
+
+AFE is one of the APR audio service on Q6DSP
+<<<<<<<
+Please refer to qcom,apr.txt for details of the common apr service bindings
+used by all apr services. Must contain the following properties.
+=======
+Please refer to qcom,apr.txt for details of the coommon apr service bindings
+used by the apr service device.
+
+- but must contain the following property:
+>>>>>>>
+
+- compatible:
+ Usage: required
+ Value type: <stringlist>
+<<<<<<<
+ Definition: must be "qcom,afe-v<MAJOR-NUMBER>.<MINOR-NUMBER>".
+ example "qcom,afe-v2.0"
+
+- qcom,apr-svc-id
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Must be 4 for Audio Front End Service.
+
+- qcom,apr-svc-name
+ Usage: required
+ Value type: <stringlist>
+ Definition: Must be "AFE"
+
+- #sound-dai-cells
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Must be 1
+
+
+= EXAMPLE
+
+q6afe {
+ compatible = "qcom,q6afe";
+ qcom,apr-svc-name = "AFE";
+ qcom,apr-svc-id = <APR_SVC_AFE>;
+ #sound-dai-cells = <1>;
+=======
+ Definition: must be "qcom,q6afe-v<MAJOR-NUMBER>.<MINOR-NUMBER>"
+ Or "qcom,q6afe" where the version number can be queried
+ from DSP.
+ example "qcom,q6afe"
+
+= AFE DAIs (Digial Audio Interface)
+"dais" subnode of the AFE node. It represents afe dais, each afe dai is a
+subnode of "dais" representing board specific dai setup.
+"dais" node should have following properties followed by dai children.
+
+- #sound-dai-cells
+ Usage: required
+ Value type: <u32>
+ Definition: Must be 1
+
+- #address-cells
+ Usage: required
+ Value type: <u32>
+ Definition: Must be 1
+
+- #size-cells
+ Usage: required
+ Value type: <u32>
+ Definition: Must be 0
+
+== AFE DAI is subnode of "dais" and represent a dai, it includes board specific
+configuration of each dai. Must contain the following properties.
+
+- reg
+ Usage: required
+ Value type: <u32>
+ Definition: Must be dai id
+
+- qcom,sd-lines
+ Usage: required for mi2s interface
+ Value type: <prop-encoded-array>
+ Definition: Must be list of serial data lines used by this dai.
+ should be one or more of the 1-4 sd lines.
+
+ - qcom,tdm-sync-mode:
+ Usage: required for tdm interface
+ Value type: <prop-encoded-array>
+ Definition: Synchronization mode.
+ 0 - Short sync bit mode
+ 1 - Long sync mode
+ 2 - Short sync slot mode
+
+ - qcom,tdm-sync-src:
+ Usage: required for tdm interface
+ Value type: <prop-encoded-array>
+ Definition: Synchronization source.
+ 0 - External source
+ 1 - Internal source
+
+ - qcom,tdm-data-out:
+ Usage: required for tdm interface
+ Value type: <prop-encoded-array>
+ Definition: Data out signal to drive with other masters.
+ 0 - Disable
+ 1 - Enable
+
+ - qcom,tdm-invert-sync:
+ Usage: required for tdm interface
+ Value type: <prop-encoded-array>
+ Definition: Invert the sync.
+ 0 - Normal
+ 1 - Invert
+
+ - qcom,tdm-data-delay:
+ Usage: required for tdm interface
+ Value type: <prop-encoded-array>
+ Definition: Number of bit clock to delay data
+ with respect to sync edge.
+ 0 - 0 bit clock cycle
+ 1 - 1 bit clock cycle
+ 2 - 2 bit clock cycle
+
+ - qcom,tdm-data-align:
+ Usage: required for tdm interface
+ Value type: <prop-encoded-array>
+ Definition: Indicate how data is packed
+ within the slot. For example, 32 slot width in case of
+ sample bit width is 24.
+ 0 - MSB
+ 1 - LSB
+
+= EXAMPLE
+
+q6afe@4 {
+ compatible = "qcom,q6afe";
+ reg = <APR_SVC_AFE>;
+
+ dais {
+ #sound-dai-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ hdmi@1 {
+ reg = <1>;
+ };
+
+ tdm@24 {
+ reg = <24>;
+ qcom,tdm-sync-mode = <1>:
+ qcom,tdm-sync-src = <1>;
+ qcom,tdm-data-out = <0>;
+ qcom,tdm-invert-sync = <1>;
+ qcom,tdm-data-delay = <1>;
+ qcom,tdm-data-align = <0>;
+
+ };
+
+ tdm@25 {
+ reg = <25>;
+ qcom,tdm-sync-mode = <1>:
+ qcom,tdm-sync-src = <1>;
+ qcom,tdm-data-out = <0>;
+ qcom,tdm-invert-sync = <1>;
+ qcom,tdm-data-delay <1>:
+ qcom,tdm-data-align = <0>;
+ };
+
+ prim-mi2s-rx@16 {
+ reg = <16>;
+ qcom,sd-lines = <1 3>;
+ };
+
+ prim-mi2s-tx@17 {
+ reg = <17>;
+ qcom,sd-lines = <2>;
+ };
+
+ sec-mi2s-rx@18 {
+ reg = <18>;
+ qcom,sd-lines = <1 4>;
+ };
+
+ sec-mi2s-tx@19 {
+ reg = <19>;
+ qcom,sd-lines = <2>;
+ };
+
+ tert-mi2s-rx@20 {
+ reg = <20>;
+ qcom,sd-lines = <2 4>;
+ };
+
+ tert-mi2s-tx@21 {
+ reg = <21>;
+ qcom,sd-lines = <1>;
+ };
+
+ quat-mi2s-rx@22 {
+ reg = <22>;
+ qcom,sd-lines = <1>;
+ };
+
+ quat-mi2s-tx@23 {
+ reg = <23>;
+ qcom,sd-lines = <2>;
+ };
+ };
+>>>>>>>
+};
diff --git a/rr-cache/aa3ac109aefb3e67b8ab599b710bba0447979d05/thisimage b/rr-cache/aa3ac109aefb3e67b8ab599b710bba0447979d05/thisimage
new file mode 100644
index 0000000..be43d3d
--- /dev/null
+++ b/rr-cache/aa3ac109aefb3e67b8ab599b710bba0447979d05/thisimage
@@ -0,0 +1,208 @@
+Qualcomm Audio Front End (Q6AFE) binding
+
+AFE is one of the APR audio service on Q6DSP
+<<<<<<<
+Please refer to qcom,apr.txt for details of the common apr service bindings
+used by all apr services. Must contain the following properties.
+=======
+Please refer to qcom,apr.txt for details of the coommon apr service bindings
+used by the apr service device.
+
+- but must contain the following property:
+>>>>>>>
+
+- compatible:
+ Usage: required
+ Value type: <stringlist>
+<<<<<<<
+ Definition: must be "qcom,afe-v<MAJOR-NUMBER>.<MINOR-NUMBER>".
+ example "qcom,afe-v2.0"
+
+- qcom,apr-svc-id
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Must be 4 for Audio Front End Service.
+
+- qcom,apr-svc-name
+ Usage: required
+ Value type: <stringlist>
+ Definition: Must be "AFE"
+
+- #sound-dai-cells
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Must be 1
+
+
+= EXAMPLE
+
+q6afe {
+ compatible = "qcom,q6afe";
+ qcom,apr-svc-name = "AFE";
+ qcom,apr-svc-id = <APR_SVC_AFE>;
+ #sound-dai-cells = <1>;
+=======
+ Definition: must be "qcom,q6afe-v<MAJOR-NUMBER>.<MINOR-NUMBER>"
+ Or "qcom,q6afe" where the version number can be queried
+ from DSP.
+ example "qcom,q6afe"
+
+= AFE DAIs (Digial Audio Interface)
+"dais" subnode of the AFE node. It represents afe dais, each afe dai is a
+subnode of "dais" representing board specific dai setup.
+"dais" node should have following properties followed by dai children.
+
+- #sound-dai-cells
+ Usage: required
+ Value type: <u32>
+ Definition: Must be 1
+
+- #address-cells
+ Usage: required
+ Value type: <u32>
+ Definition: Must be 1
+
+- #size-cells
+ Usage: required
+ Value type: <u32>
+ Definition: Must be 0
+
+== AFE DAI is subnode of "dais" and represent a dai, it includes board specific
+configuration of each dai. Must contain the following properties.
+
+- reg
+ Usage: required
+ Value type: <u32>
+ Definition: Must be dai id
+
+- qcom,sd-lines
+ Usage: required for mi2s interface
+ Value type: <prop-encoded-array>
+ Definition: Must be list of serial data lines used by this dai.
+ should be one or more of the 1-4 sd lines.
+
+ - qcom,tdm-sync-mode:
+ Usage: required for tdm interface
+ Value type: <prop-encoded-array>
+ Definition: Synchronization mode.
+ 0 - Short sync bit mode
+ 1 - Long sync mode
+ 2 - Short sync slot mode
+
+ - qcom,tdm-sync-src:
+ Usage: required for tdm interface
+ Value type: <prop-encoded-array>
+ Definition: Synchronization source.
+ 0 - External source
+ 1 - Internal source
+
+ - qcom,tdm-data-out:
+ Usage: required for tdm interface
+ Value type: <prop-encoded-array>
+ Definition: Data out signal to drive with other masters.
+ 0 - Disable
+ 1 - Enable
+
+ - qcom,tdm-invert-sync:
+ Usage: required for tdm interface
+ Value type: <prop-encoded-array>
+ Definition: Invert the sync.
+ 0 - Normal
+ 1 - Invert
+
+ - qcom,tdm-data-delay:
+ Usage: required for tdm interface
+ Value type: <prop-encoded-array>
+ Definition: Number of bit clock to delay data
+ with respect to sync edge.
+ 0 - 0 bit clock cycle
+ 1 - 1 bit clock cycle
+ 2 - 2 bit clock cycle
+
+ - qcom,tdm-data-align:
+ Usage: required for tdm interface
+ Value type: <prop-encoded-array>
+ Definition: Indicate how data is packed
+ within the slot. For example, 32 slot width in case of
+ sample bit width is 24.
+ 0 - MSB
+ 1 - LSB
+
+= EXAMPLE
+
+q6afe@4 {
+ compatible = "qcom,q6afe";
+ reg = <APR_SVC_AFE>;
+
+ dais {
+ #sound-dai-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ hdmi@1 {
+ reg = <1>;
+ };
+
+ tdm@24 {
+ reg = <24>;
+ qcom,tdm-sync-mode = <1>:
+ qcom,tdm-sync-src = <1>;
+ qcom,tdm-data-out = <0>;
+ qcom,tdm-invert-sync = <1>;
+ qcom,tdm-data-delay = <1>;
+ qcom,tdm-data-align = <0>;
+
+ };
+
+ tdm@25 {
+ reg = <25>;
+ qcom,tdm-sync-mode = <1>:
+ qcom,tdm-sync-src = <1>;
+ qcom,tdm-data-out = <0>;
+ qcom,tdm-invert-sync = <1>;
+ qcom,tdm-data-delay <1>:
+ qcom,tdm-data-align = <0>;
+ };
+
+ prim-mi2s-rx@16 {
+ reg = <16>;
+ qcom,sd-lines = <1 3>;
+ };
+
+ prim-mi2s-tx@17 {
+ reg = <17>;
+ qcom,sd-lines = <2>;
+ };
+
+ sec-mi2s-rx@18 {
+ reg = <18>;
+ qcom,sd-lines = <1 4>;
+ };
+
+ sec-mi2s-tx@19 {
+ reg = <19>;
+ qcom,sd-lines = <2>;
+ };
+
+ tert-mi2s-rx@20 {
+ reg = <20>;
+ qcom,sd-lines = <2 4>;
+ };
+
+ tert-mi2s-tx@21 {
+ reg = <21>;
+ qcom,sd-lines = <1>;
+ };
+
+ quat-mi2s-rx@22 {
+ reg = <22>;
+ qcom,sd-lines = <1>;
+ };
+
+ quat-mi2s-tx@23 {
+ reg = <23>;
+ qcom,sd-lines = <2>;
+ };
+ };
+>>>>>>>
+};
diff --git a/rr-cache/ad97e59e50cce01d2ea5e80599105a27ec6fafea/preimage b/rr-cache/ad97e59e50cce01d2ea5e80599105a27ec6fafea/preimage
new file mode 100644
index 0000000..e2dee01
--- /dev/null
+++ b/rr-cache/ad97e59e50cce01d2ea5e80599105a27ec6fafea/preimage
@@ -0,0 +1,1261 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
+
+#include <linux/clk.h>
+#include <linux/console.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/qcom-geni-se.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+
+/* UART specific GENI registers */
+#define SE_UART_TX_TRANS_CFG 0x25c
+#define SE_UART_TX_WORD_LEN 0x268
+#define SE_UART_TX_STOP_BIT_LEN 0x26c
+#define SE_UART_TX_TRANS_LEN 0x270
+#define SE_UART_RX_TRANS_CFG 0x280
+#define SE_UART_RX_WORD_LEN 0x28c
+#define SE_UART_RX_STALE_CNT 0x294
+#define SE_UART_TX_PARITY_CFG 0x2a4
+#define SE_UART_RX_PARITY_CFG 0x2a8
+
+/* SE_UART_TRANS_CFG */
+#define UART_TX_PAR_EN BIT(0)
+#define UART_CTS_MASK BIT(1)
+
+/* SE_UART_TX_WORD_LEN */
+#define TX_WORD_LEN_MSK GENMASK(9, 0)
+
+/* SE_UART_TX_STOP_BIT_LEN */
+#define TX_STOP_BIT_LEN_MSK GENMASK(23, 0)
+#define TX_STOP_BIT_LEN_1 0
+#define TX_STOP_BIT_LEN_1_5 1
+#define TX_STOP_BIT_LEN_2 2
+
+/* SE_UART_TX_TRANS_LEN */
+#define TX_TRANS_LEN_MSK GENMASK(23, 0)
+
+/* SE_UART_RX_TRANS_CFG */
+#define UART_RX_INS_STATUS_BIT BIT(2)
+#define UART_RX_PAR_EN BIT(3)
+
+/* SE_UART_RX_WORD_LEN */
+#define RX_WORD_LEN_MASK GENMASK(9, 0)
+
+/* SE_UART_RX_STALE_CNT */
+#define RX_STALE_CNT GENMASK(23, 0)
+
+/* SE_UART_TX_PARITY_CFG/RX_PARITY_CFG */
+#define PAR_CALC_EN BIT(0)
+#define PAR_MODE_MSK GENMASK(2, 1)
+#define PAR_MODE_SHFT 1
+#define PAR_EVEN 0x00
+#define PAR_ODD 0x01
+#define PAR_SPACE 0x10
+#define PAR_MARK 0x11
+
+/* UART M_CMD OP codes */
+#define UART_START_TX 0x1
+#define UART_START_BREAK 0x4
+#define UART_STOP_BREAK 0x5
+/* UART S_CMD OP codes */
+#define UART_START_READ 0x1
+#define UART_PARAM 0x1
+
+#define UART_OVERSAMPLING 32
+#define STALE_TIMEOUT 16
+#define DEFAULT_BITS_PER_CHAR 10
+#define GENI_UART_CONS_PORTS 1
+#define DEF_FIFO_DEPTH_WORDS 16
+#define DEF_TX_WM 2
+#define DEF_FIFO_WIDTH_BITS 32
+#define UART_CONSOLE_RX_WM 2
+
+#ifdef CONFIG_CONSOLE_POLL
+#define RX_BYTES_PW 1
+#else
+#define RX_BYTES_PW 4
+#endif
+
+struct qcom_geni_serial_port {
+ struct uart_port uport;
+ struct geni_se se;
+ char name[20];
+ u32 tx_fifo_depth;
+ u32 tx_fifo_width;
+ u32 rx_fifo_depth;
+ u32 tx_wm;
+ u32 rx_wm;
+ u32 rx_rfr;
+ enum geni_se_xfer_mode xfer_mode;
+ bool setup;
+ int (*handle_rx)(struct uart_port *uport, u32 bytes, bool drop);
+ unsigned int baud;
+ unsigned int tx_bytes_pw;
+ unsigned int rx_bytes_pw;
+ bool brk;
+};
+
+static const struct uart_ops qcom_geni_console_pops;
+static struct uart_driver qcom_geni_console_driver;
+static int handle_rx_console(struct uart_port *uport, u32 bytes, bool drop);
+static unsigned int qcom_geni_serial_tx_empty(struct uart_port *port);
+static void qcom_geni_serial_stop_rx(struct uart_port *uport);
+
+static const unsigned long root_freq[] = {7372800, 14745600, 19200000, 29491200,
+ 32000000, 48000000, 64000000, 80000000,
+ 96000000, 100000000};
+
+#define to_dev_port(ptr, member) \
+ container_of(ptr, struct qcom_geni_serial_port, member)
+
+static struct qcom_geni_serial_port qcom_geni_console_port = {
+ .uport = {
+ .iotype = UPIO_MEM,
+ .ops = &qcom_geni_console_pops,
+ .flags = UPF_BOOT_AUTOCONF,
+ .line = 0,
+ },
+};
+
+static int qcom_geni_serial_request_port(struct uart_port *uport)
+{
+ struct platform_device *pdev = to_platform_device(uport->dev);
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ uport->membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(uport->membase))
+ return PTR_ERR(uport->membase);
+ port->se.base = uport->membase;
+ return 0;
+}
+
+static void qcom_geni_serial_config_port(struct uart_port *uport, int cfg_flags)
+{
+ if (cfg_flags & UART_CONFIG_TYPE) {
+ uport->type = PORT_MSM;
+ qcom_geni_serial_request_port(uport);
+ }
+}
+
+static unsigned int qcom_geni_cons_get_mctrl(struct uart_port *uport)
+{
+ return TIOCM_DSR | TIOCM_CAR | TIOCM_CTS;
+}
+
+static void qcom_geni_cons_set_mctrl(struct uart_port *uport,
+ unsigned int mctrl)
+{
+}
+
+static const char *qcom_geni_serial_get_type(struct uart_port *uport)
+{
+ return "MSM";
+}
+
+static struct qcom_geni_serial_port *get_port_from_line(int line)
+{
+ if (line < 0 || line >= GENI_UART_CONS_PORTS)
+ return ERR_PTR(-ENXIO);
+ return &qcom_geni_console_port;
+}
+
+static bool qcom_geni_serial_poll_bit(struct uart_port *uport,
+ int offset, int field, bool set)
+{
+ u32 reg;
+ struct qcom_geni_serial_port *port;
+ unsigned int baud;
+ unsigned int fifo_bits;
+ unsigned long timeout_us = 20000;
+
+ /* Ensure polling is not re-ordered before the prior writes/reads */
+ mb();
+
+ if (uport->private_data) {
+ port = to_dev_port(uport, uport);
+ baud = port->baud;
+ if (!baud)
+ baud = 115200;
+ fifo_bits = port->tx_fifo_depth * port->tx_fifo_width;
+ /*
+ * Total polling iterations based on FIFO worth of bytes to be
+ * sent at current baud. Add a little fluff to the wait.
+ */
+ timeout_us = ((fifo_bits * USEC_PER_SEC) / baud) + 500;
+ }
+
+ /*
+ * Use custom implementation instead of readl_poll_atomic since ktimer
+ * is not ready at the time of early console.
+ */
+<<<<<<<
+=======
+ timeout_us = DIV_ROUND_UP(timeout_us, 10) * 10;
+>>>>>>>
+ while (timeout_us) {
+ reg = readl_relaxed(uport->membase + offset);
+ if ((bool)(reg & field) == set)
+ return true;
+ udelay(10);
+ timeout_us -= 10;
+ }
+ return false;
+}
+
+static void qcom_geni_serial_setup_tx(struct uart_port *uport, u32 xmit_size)
+{
+ u32 m_cmd;
+
+ writel_relaxed(xmit_size, uport->membase + SE_UART_TX_TRANS_LEN);
+ m_cmd = UART_START_TX << M_OPCODE_SHFT;
+ writel(m_cmd, uport->membase + SE_GENI_M_CMD0);
+}
+
+static void qcom_geni_serial_poll_tx_done(struct uart_port *uport)
+{
+ int done;
+ u32 irq_clear = M_CMD_DONE_EN;
+
+ done = qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+ M_CMD_DONE_EN, true);
+ if (!done) {
+ writel_relaxed(M_GENI_CMD_ABORT, uport->membase +
+ SE_GENI_M_CMD_CTRL_REG);
+ irq_clear |= M_CMD_ABORT_EN;
+ qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+ M_CMD_ABORT_EN, true);
+ }
+ writel_relaxed(irq_clear, uport->membase + SE_GENI_M_IRQ_CLEAR);
+}
+
+static void qcom_geni_serial_abort_rx(struct uart_port *uport)
+{
+ u32 irq_clear = S_CMD_DONE_EN | S_CMD_ABORT_EN;
+
+ writel(S_GENI_CMD_ABORT, uport->membase + SE_GENI_S_CMD_CTRL_REG);
+ qcom_geni_serial_poll_bit(uport, SE_GENI_S_CMD_CTRL_REG,
+ S_GENI_CMD_ABORT, false);
+ writel_relaxed(irq_clear, uport->membase + SE_GENI_S_IRQ_CLEAR);
+ writel_relaxed(FORCE_DEFAULT, uport->membase + GENI_FORCE_DEFAULT_REG);
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+static int qcom_geni_serial_get_char(struct uart_port *uport)
+{
+ u32 rx_fifo;
+ u32 status;
+
+ status = readl_relaxed(uport->membase + SE_GENI_M_IRQ_STATUS);
+ writel_relaxed(status, uport->membase + SE_GENI_M_IRQ_CLEAR);
+
+ status = readl_relaxed(uport->membase + SE_GENI_S_IRQ_STATUS);
+ writel_relaxed(status, uport->membase + SE_GENI_S_IRQ_CLEAR);
+
+ /*
+ * Ensure the writes to clear interrupts is not re-ordered after
+ * reading the data.
+ */
+ mb();
+
+ status = readl_relaxed(uport->membase + SE_GENI_RX_FIFO_STATUS);
+ if (!(status & RX_FIFO_WC_MSK))
+ return NO_POLL_CHAR;
+
+ rx_fifo = readl(uport->membase + SE_GENI_RX_FIFOn);
+ return rx_fifo & 0xff;
+}
+
+static void qcom_geni_serial_poll_put_char(struct uart_port *uport,
+ unsigned char c)
+{
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+
+ writel_relaxed(port->tx_wm, uport->membase + SE_GENI_TX_WATERMARK_REG);
+ qcom_geni_serial_setup_tx(uport, 1);
+ WARN_ON(!qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+ M_TX_FIFO_WATERMARK_EN, true));
+ writel_relaxed(c, uport->membase + SE_GENI_TX_FIFOn);
+ writel_relaxed(M_TX_FIFO_WATERMARK_EN, uport->membase +
+ SE_GENI_M_IRQ_CLEAR);
+ qcom_geni_serial_poll_tx_done(uport);
+}
+#endif
+
+#ifdef CONFIG_SERIAL_QCOM_GENI_CONSOLE
+static void qcom_geni_serial_wr_char(struct uart_port *uport, int ch)
+{
+ writel_relaxed(ch, uport->membase + SE_GENI_TX_FIFOn);
+}
+
+static void
+__qcom_geni_serial_console_write(struct uart_port *uport, const char *s,
+ unsigned int count)
+{
+ int i;
+ u32 bytes_to_send = count;
+
+ for (i = 0; i < count; i++) {
+ /*
+ * uart_console_write() adds a carriage return for each newline.
+ * Account for additional bytes to be written.
+ */
+ if (s[i] == '\n')
+ bytes_to_send++;
+ }
+
+ writel_relaxed(DEF_TX_WM, uport->membase + SE_GENI_TX_WATERMARK_REG);
+ qcom_geni_serial_setup_tx(uport, bytes_to_send);
+ for (i = 0; i < count; ) {
+ size_t chars_to_write = 0;
+ size_t avail = DEF_FIFO_DEPTH_WORDS - DEF_TX_WM;
+
+ /*
+ * If the WM bit never set, then the Tx state machine is not
+ * in a valid state, so break, cancel/abort any existing
+ * command. Unfortunately the current data being written is
+ * lost.
+ */
+ if (!qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+ M_TX_FIFO_WATERMARK_EN, true))
+ break;
+ chars_to_write = min_t(size_t, count - i, avail / 2);
+ uart_console_write(uport, s + i, chars_to_write,
+ qcom_geni_serial_wr_char);
+ writel_relaxed(M_TX_FIFO_WATERMARK_EN, uport->membase +
+ SE_GENI_M_IRQ_CLEAR);
+ i += chars_to_write;
+ }
+ qcom_geni_serial_poll_tx_done(uport);
+}
+
+static void qcom_geni_serial_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ struct uart_port *uport;
+ struct qcom_geni_serial_port *port;
+ bool locked = true;
+ unsigned long flags;
+
+ WARN_ON(co->index < 0 || co->index >= GENI_UART_CONS_PORTS);
+
+ port = get_port_from_line(co->index);
+ if (IS_ERR(port))
+ return;
+
+ uport = &port->uport;
+ if (oops_in_progress)
+ locked = spin_trylock_irqsave(&uport->lock, flags);
+ else
+ spin_lock_irqsave(&uport->lock, flags);
+
+ /* Cancel the current write to log the fault */
+ if (!locked) {
+ geni_se_cancel_m_cmd(&port->se);
+ if (!qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+ M_CMD_CANCEL_EN, true)) {
+ geni_se_abort_m_cmd(&port->se);
+ qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+ M_CMD_ABORT_EN, true);
+ writel_relaxed(M_CMD_ABORT_EN, uport->membase +
+ SE_GENI_M_IRQ_CLEAR);
+ }
+ writel_relaxed(M_CMD_CANCEL_EN, uport->membase +
+ SE_GENI_M_IRQ_CLEAR);
+ }
+
+ __qcom_geni_serial_console_write(uport, s, count);
+ if (locked)
+ spin_unlock_irqrestore(&uport->lock, flags);
+}
+
+static int handle_rx_console(struct uart_port *uport, u32 bytes, bool drop)
+{
+ u32 i;
+ unsigned char buf[sizeof(u32)];
+ struct tty_port *tport;
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+
+ tport = &uport->state->port;
+ for (i = 0; i < bytes; ) {
+ int c;
+ int chunk = min_t(int, bytes - i, port->rx_bytes_pw);
+
+ ioread32_rep(uport->membase + SE_GENI_RX_FIFOn, buf, 1);
+ i += chunk;
+ if (drop)
+ continue;
+
+ for (c = 0; c < chunk; c++) {
+ int sysrq;
+
+ uport->icount.rx++;
+ if (port->brk && buf[c] == 0) {
+ port->brk = false;
+ if (uart_handle_break(uport))
+ continue;
+ }
+
+ sysrq = uart_handle_sysrq_char(uport, buf[c]);
+ if (!sysrq)
+ tty_insert_flip_char(tport, buf[c], TTY_NORMAL);
+ }
+ }
+ if (!drop)
+ tty_flip_buffer_push(tport);
+ return 0;
+}
+#else
+static int handle_rx_console(struct uart_port *uport, u32 bytes, bool drop)
+{
+ return -EPERM;
+}
+
+#endif /* CONFIG_SERIAL_QCOM_GENI_CONSOLE */
+
+static void qcom_geni_serial_start_tx(struct uart_port *uport)
+{
+ u32 irq_en;
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+ u32 status;
+
+ if (port->xfer_mode == GENI_SE_FIFO) {
+ /*
+ * readl ensures reading & writing of IRQ_EN register
+ * is not re-ordered before checking the status of the
+ * Serial Engine.
+ */
+ status = readl(uport->membase + SE_GENI_STATUS);
+ if (status & M_GENI_CMD_ACTIVE)
+ return;
+
+ if (!qcom_geni_serial_tx_empty(uport))
+ return;
+
+ irq_en = readl_relaxed(uport->membase + SE_GENI_M_IRQ_EN);
+ irq_en |= M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN;
+
+ writel_relaxed(port->tx_wm, uport->membase +
+ SE_GENI_TX_WATERMARK_REG);
+ writel_relaxed(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
+ }
+}
+
+static void qcom_geni_serial_stop_tx(struct uart_port *uport)
+{
+ u32 irq_en;
+ u32 status;
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+
+ irq_en = readl_relaxed(uport->membase + SE_GENI_M_IRQ_EN);
+ irq_en &= ~M_CMD_DONE_EN;
+ if (port->xfer_mode == GENI_SE_FIFO) {
+ irq_en &= ~M_TX_FIFO_WATERMARK_EN;
+ writel_relaxed(0, uport->membase +
+ SE_GENI_TX_WATERMARK_REG);
+ }
+ writel_relaxed(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
+ status = readl_relaxed(uport->membase + SE_GENI_STATUS);
+ /* Possible stop tx is called multiple times. */
+ if (!(status & M_GENI_CMD_ACTIVE))
+ return;
+
+ /*
+ * Ensure cancel command write is not re-ordered before checking
+ * the status of the Primary Sequencer.
+ */
+ mb();
+
+ geni_se_cancel_m_cmd(&port->se);
+ if (!qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+ M_CMD_CANCEL_EN, true)) {
+ geni_se_abort_m_cmd(&port->se);
+ qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+ M_CMD_ABORT_EN, true);
+ writel_relaxed(M_CMD_ABORT_EN, uport->membase +
+ SE_GENI_M_IRQ_CLEAR);
+ }
+ writel_relaxed(M_CMD_CANCEL_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
+}
+
+static void qcom_geni_serial_start_rx(struct uart_port *uport)
+{
+ u32 irq_en;
+ u32 status;
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+
+ status = readl_relaxed(uport->membase + SE_GENI_STATUS);
+ if (status & S_GENI_CMD_ACTIVE)
+ qcom_geni_serial_stop_rx(uport);
+
+ /*
+ * Ensure setup command write is not re-ordered before checking
+ * the status of the Secondary Sequencer.
+ */
+ mb();
+
+ geni_se_setup_s_cmd(&port->se, UART_START_READ, 0);
+
+ if (port->xfer_mode == GENI_SE_FIFO) {
+ irq_en = readl_relaxed(uport->membase + SE_GENI_S_IRQ_EN);
+ irq_en |= S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN;
+ writel_relaxed(irq_en, uport->membase + SE_GENI_S_IRQ_EN);
+
+ irq_en = readl_relaxed(uport->membase + SE_GENI_M_IRQ_EN);
+ irq_en |= M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN;
+ writel_relaxed(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
+ }
+}
+
+static void qcom_geni_serial_stop_rx(struct uart_port *uport)
+{
+ u32 irq_en;
+ u32 status;
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+ u32 irq_clear = S_CMD_DONE_EN;
+
+ if (port->xfer_mode == GENI_SE_FIFO) {
+ irq_en = readl_relaxed(uport->membase + SE_GENI_S_IRQ_EN);
+ irq_en &= ~(S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN);
+ writel_relaxed(irq_en, uport->membase + SE_GENI_S_IRQ_EN);
+
+ irq_en = readl_relaxed(uport->membase + SE_GENI_M_IRQ_EN);
+ irq_en &= ~(M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN);
+ writel_relaxed(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
+ }
+
+ status = readl_relaxed(uport->membase + SE_GENI_STATUS);
+ /* Possible stop rx is called multiple times. */
+ if (!(status & S_GENI_CMD_ACTIVE))
+ return;
+
+ /*
+ * Ensure cancel command write is not re-ordered before checking
+ * the status of the Secondary Sequencer.
+ */
+ mb();
+
+ geni_se_cancel_s_cmd(&port->se);
+ qcom_geni_serial_poll_bit(uport, SE_GENI_S_CMD_CTRL_REG,
+ S_GENI_CMD_CANCEL, false);
+ status = readl_relaxed(uport->membase + SE_GENI_STATUS);
+ writel_relaxed(irq_clear, uport->membase + SE_GENI_S_IRQ_CLEAR);
+ if (status & S_GENI_CMD_ACTIVE)
+ qcom_geni_serial_abort_rx(uport);
+}
+
+static void qcom_geni_serial_handle_rx(struct uart_port *uport, bool drop)
+{
+ u32 status;
+ u32 word_cnt;
+ u32 last_word_byte_cnt;
+ u32 last_word_partial;
+ u32 total_bytes;
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+
+ status = readl_relaxed(uport->membase + SE_GENI_RX_FIFO_STATUS);
+ word_cnt = status & RX_FIFO_WC_MSK;
+ last_word_partial = status & RX_LAST;
+ last_word_byte_cnt = (status & RX_LAST_BYTE_VALID_MSK) >>
+ RX_LAST_BYTE_VALID_SHFT;
+
+ if (!word_cnt)
+ return;
+ total_bytes = port->rx_bytes_pw * (word_cnt - 1);
+ if (last_word_partial && last_word_byte_cnt)
+ total_bytes += last_word_byte_cnt;
+ else
+ total_bytes += port->rx_bytes_pw;
+ port->handle_rx(uport, total_bytes, drop);
+}
+
+static void qcom_geni_serial_handle_tx(struct uart_port *uport)
+{
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+ struct circ_buf *xmit = &uport->state->xmit;
+ size_t avail;
+ size_t remaining;
+ int i;
+ u32 status;
+ unsigned int chunk;
+ int tail;
+
+ chunk = uart_circ_chars_pending(xmit);
+ status = readl_relaxed(uport->membase + SE_GENI_TX_FIFO_STATUS);
+ /* Both FIFO and framework buffer are drained */
+ if (!chunk && !status) {
+ qcom_geni_serial_stop_tx(uport);
+ goto out_write_wakeup;
+ }
+
+ avail = (port->tx_fifo_depth - port->tx_wm) * port->tx_bytes_pw;
+<<<<<<<
+ tail = (xmit->tail + port->xmit_size) & (UART_XMIT_SIZE - 1);
+ chunk = min3((size_t)chunk, UART_XMIT_SIZE - tail, avail);
+=======
+ tail = xmit->tail;
+ chunk = min3((size_t)chunk, (size_t)(UART_XMIT_SIZE - tail), avail);
+>>>>>>>
+ if (!chunk)
+ goto out_write_wakeup;
+
+ qcom_geni_serial_setup_tx(uport, chunk);
+
+ remaining = chunk;
+ for (i = 0; i < chunk; ) {
+ unsigned int tx_bytes;
+ u8 buf[sizeof(u32)];
+ int c;
+
+ memset(buf, 0, ARRAY_SIZE(buf));
+ tx_bytes = min_t(size_t, remaining, port->tx_bytes_pw);
+ for (c = 0; c < tx_bytes ; c++)
+ buf[c] = xmit->buf[tail + c];
+
+ iowrite32_rep(uport->membase + SE_GENI_TX_FIFOn, buf, 1);
+
+ i += tx_bytes;
+ tail += tx_bytes;
+ uport->icount.tx += tx_bytes;
+ remaining -= tx_bytes;
+ }
+
+ xmit->tail = tail & (UART_XMIT_SIZE - 1);
+ qcom_geni_serial_poll_tx_done(uport);
+out_write_wakeup:
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(uport);
+}
+
+static irqreturn_t qcom_geni_serial_isr(int isr, void *dev)
+{
+ unsigned int m_irq_status;
+ unsigned int s_irq_status;
+ struct uart_port *uport = dev;
+ unsigned long flags;
+ unsigned int m_irq_en;
+ bool drop_rx = false;
+ struct tty_port *tport = &uport->state->port;
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+
+ if (uport->suspended)
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&uport->lock, flags);
+ m_irq_status = readl_relaxed(uport->membase + SE_GENI_M_IRQ_STATUS);
+ s_irq_status = readl_relaxed(uport->membase + SE_GENI_S_IRQ_STATUS);
+ m_irq_en = readl_relaxed(uport->membase + SE_GENI_M_IRQ_EN);
+ writel_relaxed(m_irq_status, uport->membase + SE_GENI_M_IRQ_CLEAR);
+ writel_relaxed(s_irq_status, uport->membase + SE_GENI_S_IRQ_CLEAR);
+
+ if (WARN_ON(m_irq_status & M_ILLEGAL_CMD_EN))
+ goto out_unlock;
+
+ if (s_irq_status & S_RX_FIFO_WR_ERR_EN) {
+ uport->icount.overrun++;
+ tty_insert_flip_char(tport, 0, TTY_OVERRUN);
+ }
+
+ if (m_irq_status & (M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN) &&
+ m_irq_en & (M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN))
+ qcom_geni_serial_handle_tx(uport);
+
+ if (s_irq_status & S_GP_IRQ_0_EN || s_irq_status & S_GP_IRQ_1_EN) {
+ if (s_irq_status & S_GP_IRQ_0_EN)
+ uport->icount.parity++;
+ drop_rx = true;
+ } else if (s_irq_status & S_GP_IRQ_2_EN ||
+ s_irq_status & S_GP_IRQ_3_EN) {
+ uport->icount.brk++;
+ port->brk = true;
+ }
+
+ if (s_irq_status & S_RX_FIFO_WATERMARK_EN ||
+ s_irq_status & S_RX_FIFO_LAST_EN)
+ qcom_geni_serial_handle_rx(uport, drop_rx);
+
+out_unlock:
+ spin_unlock_irqrestore(&uport->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static void get_tx_fifo_size(struct qcom_geni_serial_port *port)
+{
+ struct uart_port *uport;
+
+ uport = &port->uport;
+ port->tx_fifo_depth = geni_se_get_tx_fifo_depth(&port->se);
+ port->tx_fifo_width = geni_se_get_tx_fifo_width(&port->se);
+ port->rx_fifo_depth = geni_se_get_rx_fifo_depth(&port->se);
+ uport->fifosize =
+ (port->tx_fifo_depth * port->tx_fifo_width) / BITS_PER_BYTE;
+}
+
+static void set_rfr_wm(struct qcom_geni_serial_port *port)
+{
+ /*
+ * Set RFR (Flow off) to FIFO_DEPTH - 2.
+ * RX WM level at 10% RX_FIFO_DEPTH.
+ * TX WM level at 10% TX_FIFO_DEPTH.
+ */
+ port->rx_rfr = port->rx_fifo_depth - 2;
+ port->rx_wm = UART_CONSOLE_RX_WM;
+ port->tx_wm = DEF_TX_WM;
+}
+
+static void qcom_geni_serial_shutdown(struct uart_port *uport)
+{
+ unsigned long flags;
+
+ /* Stop the console before stopping the current tx */
+ console_stop(uport->cons);
+
+ free_irq(uport->irq, uport);
+ spin_lock_irqsave(&uport->lock, flags);
+ qcom_geni_serial_stop_tx(uport);
+ qcom_geni_serial_stop_rx(uport);
+ spin_unlock_irqrestore(&uport->lock, flags);
+}
+
+static int qcom_geni_serial_port_setup(struct uart_port *uport)
+{
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+ unsigned int rxstale = DEFAULT_BITS_PER_CHAR * STALE_TIMEOUT;
+
+ set_rfr_wm(port);
+ writel_relaxed(rxstale, uport->membase + SE_UART_RX_STALE_CNT);
+ /*
+ * Make an unconditional cancel on the main sequencer to reset
+ * it else we could end up in data loss scenarios.
+ */
+ port->xfer_mode = GENI_SE_FIFO;
+ qcom_geni_serial_poll_tx_done(uport);
+ geni_se_config_packing(&port->se, BITS_PER_BYTE, port->tx_bytes_pw,
+ false, true, false);
+ geni_se_config_packing(&port->se, BITS_PER_BYTE, port->rx_bytes_pw,
+ false, false, true);
+ geni_se_init(&port->se, port->rx_wm, port->rx_rfr);
+ geni_se_select_mode(&port->se, port->xfer_mode);
+ port->setup = true;
+ return 0;
+}
+
+static int qcom_geni_serial_startup(struct uart_port *uport)
+{
+ int ret;
+ u32 proto;
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+
+ scnprintf(port->name, sizeof(port->name),
+ "qcom_serial_geni%d", uport->line);
+
+ proto = geni_se_read_proto(&port->se);
+ if (proto != GENI_SE_UART) {
+ dev_err(uport->dev, "Invalid FW loaded, proto: %d\n", proto);
+ return -ENXIO;
+ }
+
+ get_tx_fifo_size(port);
+ if (!port->setup) {
+ ret = qcom_geni_serial_port_setup(uport);
+ if (ret)
+ return ret;
+ }
+
+ ret = request_irq(uport->irq, qcom_geni_serial_isr, IRQF_TRIGGER_HIGH,
+ port->name, uport);
+ if (ret)
+ dev_err(uport->dev, "Failed to get IRQ ret %d\n", ret);
+ return ret;
+}
+
+static unsigned long get_clk_cfg(unsigned long clk_freq)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(root_freq); i++) {
+ if (!(root_freq[i] % clk_freq))
+ return root_freq[i];
+ }
+ return 0;
+}
+
+static unsigned long get_clk_div_rate(unsigned int baud, unsigned int *clk_div)
+{
+ unsigned long ser_clk;
+ unsigned long desired_clk;
+
+ desired_clk = baud * UART_OVERSAMPLING;
+ ser_clk = get_clk_cfg(desired_clk);
+ if (!ser_clk) {
+ pr_err("%s: Can't find matching DFS entry for baud %d\n",
+ __func__, baud);
+ return ser_clk;
+ }
+
+ *clk_div = ser_clk / desired_clk;
+ return ser_clk;
+}
+
+static void qcom_geni_serial_set_termios(struct uart_port *uport,
+ struct ktermios *termios, struct ktermios *old)
+{
+ unsigned int baud;
+ unsigned int bits_per_char;
+ unsigned int tx_trans_cfg;
+ unsigned int tx_parity_cfg;
+ unsigned int rx_trans_cfg;
+ unsigned int rx_parity_cfg;
+ unsigned int stop_bit_len;
+ unsigned int clk_div;
+ unsigned long ser_clk_cfg;
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+ unsigned long clk_rate;
+
+ qcom_geni_serial_stop_rx(uport);
+ /* baud rate */
+ baud = uart_get_baud_rate(uport, termios, old, 300, 4000000);
+ port->baud = baud;
+ clk_rate = get_clk_div_rate(baud, &clk_div);
+ if (!clk_rate)
+ goto out_restart_rx;
+
+ uport->uartclk = clk_rate;
+ clk_set_rate(port->se.clk, clk_rate);
+ ser_clk_cfg = SER_CLK_EN;
+ ser_clk_cfg |= clk_div << CLK_DIV_SHFT;
+
+ /* parity */
+ tx_trans_cfg = readl_relaxed(uport->membase + SE_UART_TX_TRANS_CFG);
+ tx_parity_cfg = readl_relaxed(uport->membase + SE_UART_TX_PARITY_CFG);
+ rx_trans_cfg = readl_relaxed(uport->membase + SE_UART_RX_TRANS_CFG);
+ rx_parity_cfg = readl_relaxed(uport->membase + SE_UART_RX_PARITY_CFG);
+ if (termios->c_cflag & PARENB) {
+ tx_trans_cfg |= UART_TX_PAR_EN;
+ rx_trans_cfg |= UART_RX_PAR_EN;
+ tx_parity_cfg |= PAR_CALC_EN;
+ rx_parity_cfg |= PAR_CALC_EN;
+ if (termios->c_cflag & PARODD) {
+ tx_parity_cfg |= PAR_ODD;
+ rx_parity_cfg |= PAR_ODD;
+ } else if (termios->c_cflag & CMSPAR) {
+ tx_parity_cfg |= PAR_SPACE;
+ rx_parity_cfg |= PAR_SPACE;
+ } else {
+ tx_parity_cfg |= PAR_EVEN;
+ rx_parity_cfg |= PAR_EVEN;
+ }
+ } else {
+ tx_trans_cfg &= ~UART_TX_PAR_EN;
+ rx_trans_cfg &= ~UART_RX_PAR_EN;
+ tx_parity_cfg &= ~PAR_CALC_EN;
+ rx_parity_cfg &= ~PAR_CALC_EN;
+ }
+
+ /* bits per char */
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ bits_per_char = 5;
+ break;
+ case CS6:
+ bits_per_char = 6;
+ break;
+ case CS7:
+ bits_per_char = 7;
+ break;
+ case CS8:
+ default:
+ bits_per_char = 8;
+ break;
+ }
+
+ /* stop bits */
+ if (termios->c_cflag & CSTOPB)
+ stop_bit_len = TX_STOP_BIT_LEN_2;
+ else
+ stop_bit_len = TX_STOP_BIT_LEN_1;
+
+ /* flow control, clear the CTS_MASK bit if using flow control. */
+ if (termios->c_cflag & CRTSCTS)
+ tx_trans_cfg &= ~UART_CTS_MASK;
+ else
+ tx_trans_cfg |= UART_CTS_MASK;
+
+ if (baud)
+ uart_update_timeout(uport, termios->c_cflag, baud);
+
+ writel_relaxed(tx_trans_cfg, uport->membase + SE_UART_TX_TRANS_CFG);
+ writel_relaxed(tx_parity_cfg, uport->membase + SE_UART_TX_PARITY_CFG);
+ writel_relaxed(rx_trans_cfg, uport->membase + SE_UART_RX_TRANS_CFG);
+ writel_relaxed(rx_parity_cfg, uport->membase + SE_UART_RX_PARITY_CFG);
+ writel_relaxed(bits_per_char, uport->membase + SE_UART_TX_WORD_LEN);
+ writel_relaxed(bits_per_char, uport->membase + SE_UART_RX_WORD_LEN);
+ writel_relaxed(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN);
+ writel_relaxed(ser_clk_cfg, uport->membase + GENI_SER_M_CLK_CFG);
+ writel_relaxed(ser_clk_cfg, uport->membase + GENI_SER_S_CLK_CFG);
+out_restart_rx:
+ qcom_geni_serial_start_rx(uport);
+}
+
+static unsigned int qcom_geni_serial_tx_empty(struct uart_port *uport)
+{
+ return !readl(uport->membase + SE_GENI_TX_FIFO_STATUS);
+}
+
+#ifdef CONFIG_SERIAL_QCOM_GENI_CONSOLE
+static int __init qcom_geni_console_setup(struct console *co, char *options)
+{
+ struct uart_port *uport;
+ struct qcom_geni_serial_port *port;
+ int baud;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ if (co->index >= GENI_UART_CONS_PORTS || co->index < 0)
+ return -ENXIO;
+
+ port = get_port_from_line(co->index);
+ if (IS_ERR(port)) {
+ pr_err("Invalid line %d\n", co->index);
+ return PTR_ERR(port);
+ }
+
+ uport = &port->uport;
+
+ if (unlikely(!uport->membase))
+ return -ENXIO;
+
+ if (geni_se_resources_on(&port->se)) {
+ dev_err(port->se.dev, "Error turning on resources\n");
+ return -ENXIO;
+ }
+
+ if (unlikely(geni_se_read_proto(&port->se) != GENI_SE_UART)) {
+ geni_se_resources_off(&port->se);
+ return -ENXIO;
+ }
+
+ if (!port->setup) {
+ port->tx_bytes_pw = 1;
+ port->rx_bytes_pw = RX_BYTES_PW;
+ qcom_geni_serial_stop_rx(uport);
+ qcom_geni_serial_port_setup(uport);
+ }
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(uport, co, baud, parity, bits, flow);
+}
+
+static void qcom_geni_serial_earlycon_write(struct console *con,
+ const char *s, unsigned int n)
+{
+ struct earlycon_device *dev = con->data;
+
+ __qcom_geni_serial_console_write(&dev->port, s, n);
+}
+
+static int __init qcom_geni_serial_earlycon_setup(struct earlycon_device *dev,
+ const char *opt)
+{
+ struct uart_port *uport = &dev->port;
+ u32 tx_trans_cfg;
+ u32 tx_parity_cfg = 0; /* Disable Tx Parity */
+ u32 rx_trans_cfg = 0;
+ u32 rx_parity_cfg = 0; /* Disable Rx Parity */
+ u32 stop_bit_len = 0; /* Default stop bit length - 1 bit */
+ u32 bits_per_char;
+<<<<<<<
+=======
+ u32 ser_clk_cfg;
+ u32 baud = 115200;
+ unsigned int clk_div;
+ unsigned long clk_rate;
+>>>>>>>
+ struct geni_se se;
+
+ if (!uport->membase)
+ return -EINVAL;
+
+ memset(&se, 0, sizeof(se));
+ se.base = uport->membase;
+ if (geni_se_read_proto(&se) != GENI_SE_UART)
+ return -ENXIO;
+<<<<<<<
+=======
+
+>>>>>>>
+ /*
+ * Ignore Flow control.
+ * n = 8.
+ */
+ tx_trans_cfg = UART_CTS_MASK;
+ bits_per_char = BITS_PER_BYTE;
+
+<<<<<<<
+=======
+ clk_rate = get_clk_div_rate(baud, &clk_div);
+ if (!clk_rate)
+ return -EINVAL;
+
+ ser_clk_cfg = SER_CLK_EN | (clk_div << CLK_DIV_SHFT);
+>>>>>>>
+ /*
+ * Make an unconditional cancel on the main sequencer to reset
+ * it else we could end up in data loss scenarios.
+ */
+ qcom_geni_serial_poll_tx_done(uport);
+ qcom_geni_serial_abort_rx(uport);
+ geni_se_config_packing(&se, BITS_PER_BYTE, 1, false, true, false);
+ geni_se_init(&se, DEF_FIFO_DEPTH_WORDS / 2, DEF_FIFO_DEPTH_WORDS - 2);
+ geni_se_select_mode(&se, GENI_SE_FIFO);
+
+ writel_relaxed(tx_trans_cfg, uport->membase + SE_UART_TX_TRANS_CFG);
+ writel_relaxed(tx_parity_cfg, uport->membase + SE_UART_TX_PARITY_CFG);
+ writel_relaxed(rx_trans_cfg, uport->membase + SE_UART_RX_TRANS_CFG);
+ writel_relaxed(rx_parity_cfg, uport->membase + SE_UART_RX_PARITY_CFG);
+ writel_relaxed(bits_per_char, uport->membase + SE_UART_TX_WORD_LEN);
+ writel_relaxed(bits_per_char, uport->membase + SE_UART_RX_WORD_LEN);
+ writel_relaxed(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN);
+<<<<<<<
+=======
+ writel_relaxed(ser_clk_cfg, uport->membase + GENI_SER_M_CLK_CFG);
+ writel_relaxed(ser_clk_cfg, uport->membase + GENI_SER_S_CLK_CFG);
+>>>>>>>
+
+ dev->con->write = qcom_geni_serial_earlycon_write;
+ dev->con->setup = NULL;
+ return 0;
+}
+<<<<<<<
+OF_EARLYCON_DECLARE(qcom_geni, "qcom,geni-debug-uart",
+=======
+OF_EARLYCON_DECLARE(qcom_serial, "qcom,geni-debug-uart",
+>>>>>>>
+ qcom_geni_serial_earlycon_setup);
+
+static int __init console_register(struct uart_driver *drv)
+{
+ return uart_register_driver(drv);
+}
+
+static void console_unregister(struct uart_driver *drv)
+{
+ uart_unregister_driver(drv);
+}
+
+static struct console cons_ops = {
+ .name = "ttyMSM",
+ .write = qcom_geni_serial_console_write,
+ .device = uart_console_device,
+ .setup = qcom_geni_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &qcom_geni_console_driver,
+};
+
+static struct uart_driver qcom_geni_console_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "qcom_geni_console",
+ .dev_name = "ttyMSM",
+ .nr = GENI_UART_CONS_PORTS,
+ .cons = &cons_ops,
+};
+#else
+static int console_register(struct uart_driver *drv)
+{
+ return 0;
+}
+
+static void console_unregister(struct uart_driver *drv)
+{
+}
+#endif /* CONFIG_SERIAL_QCOM_GENI_CONSOLE */
+
+static void qcom_geni_serial_cons_pm(struct uart_port *uport,
+ unsigned int new_state, unsigned int old_state)
+{
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+
+ if (unlikely(!uart_console(uport)))
+ return;
+
+ if (new_state == UART_PM_STATE_ON && old_state == UART_PM_STATE_OFF)
+ geni_se_resources_on(&port->se);
+ else if (new_state == UART_PM_STATE_OFF &&
+ old_state == UART_PM_STATE_ON)
+ geni_se_resources_off(&port->se);
+}
+
+static const struct uart_ops qcom_geni_console_pops = {
+ .tx_empty = qcom_geni_serial_tx_empty,
+ .stop_tx = qcom_geni_serial_stop_tx,
+ .start_tx = qcom_geni_serial_start_tx,
+ .stop_rx = qcom_geni_serial_stop_rx,
+ .set_termios = qcom_geni_serial_set_termios,
+ .startup = qcom_geni_serial_startup,
+ .request_port = qcom_geni_serial_request_port,
+ .config_port = qcom_geni_serial_config_port,
+ .shutdown = qcom_geni_serial_shutdown,
+ .type = qcom_geni_serial_get_type,
+ .set_mctrl = qcom_geni_cons_set_mctrl,
+ .get_mctrl = qcom_geni_cons_get_mctrl,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = qcom_geni_serial_get_char,
+ .poll_put_char = qcom_geni_serial_poll_put_char,
+#endif
+ .pm = qcom_geni_serial_cons_pm,
+};
+
+static int qcom_geni_serial_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ int line = -1;
+ struct qcom_geni_serial_port *port;
+ struct uart_port *uport;
+ struct resource *res;
+ int irq;
+
+ if (pdev->dev.of_node)
+ line = of_alias_get_id(pdev->dev.of_node, "serial");
+
+ if (line < 0 || line >= GENI_UART_CONS_PORTS)
+ return -ENXIO;
+ port = get_port_from_line(line);
+ if (IS_ERR(port)) {
+ dev_err(&pdev->dev, "Invalid line %d\n", line);
+ return PTR_ERR(port);
+ }
+
+ uport = &port->uport;
+ /* Don't allow 2 drivers to access the same port */
+ if (uport->private_data)
+ return -ENODEV;
+
+ uport->dev = &pdev->dev;
+ port->se.dev = &pdev->dev;
+ port->se.wrapper = dev_get_drvdata(pdev->dev.parent);
+ port->se.clk = devm_clk_get(&pdev->dev, "se");
+ if (IS_ERR(port->se.clk)) {
+ ret = PTR_ERR(port->se.clk);
+ dev_err(&pdev->dev, "Err getting SE Core clk %d\n", ret);
+ return ret;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+ uport->mapbase = res->start;
+
+ port->tx_fifo_depth = DEF_FIFO_DEPTH_WORDS;
+ port->rx_fifo_depth = DEF_FIFO_DEPTH_WORDS;
+ port->tx_fifo_width = DEF_FIFO_WIDTH_BITS;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Failed to get IRQ %d\n", irq);
+ return irq;
+ }
+ uport->irq = irq;
+
+ uport->private_data = &qcom_geni_console_driver;
+ platform_set_drvdata(pdev, port);
+ port->handle_rx = handle_rx_console;
+ return uart_add_one_port(&qcom_geni_console_driver, uport);
+}
+
+static int qcom_geni_serial_remove(struct platform_device *pdev)
+{
+ struct qcom_geni_serial_port *port = platform_get_drvdata(pdev);
+ struct uart_driver *drv = port->uport.private_data;
+
+ uart_remove_one_port(drv, &port->uport);
+ return 0;
+}
+
+static int __maybe_unused qcom_geni_serial_sys_suspend_noirq(struct device *dev)
+{
+ struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
+ struct uart_port *uport = &port->uport;
+
+ uart_suspend_port(uport->private_data, uport);
+ return 0;
+}
+
+static int __maybe_unused qcom_geni_serial_sys_resume_noirq(struct device *dev)
+{
+ struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
+ struct uart_port *uport = &port->uport;
+
+ if (console_suspend_enabled && uport->suspended) {
+ uart_resume_port(uport->private_data, uport);
+ /*
+ * uart_suspend_port() invokes port shutdown which in turn
+ * frees the irq. uart_resume_port invokes port startup which
+ * performs request_irq. The request_irq auto-enables the IRQ.
+ * In addition, resume_noirq implicitly enables the IRQ and
+ * leads to an unbalanced IRQ enable warning. Disable the IRQ
+ * before returning so that the warning is suppressed.
+ */
+ disable_irq(uport->irq);
+ }
+ return 0;
+}
+
+static const struct dev_pm_ops qcom_geni_serial_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(qcom_geni_serial_sys_suspend_noirq,
+ qcom_geni_serial_sys_resume_noirq)
+};
+
+static const struct of_device_id qcom_geni_serial_match_table[] = {
+ { .compatible = "qcom,geni-debug-uart", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_geni_serial_match_table);
+
+static struct platform_driver qcom_geni_serial_platform_driver = {
+ .remove = qcom_geni_serial_remove,
+ .probe = qcom_geni_serial_probe,
+ .driver = {
+ .name = "qcom_geni_serial",
+ .of_match_table = qcom_geni_serial_match_table,
+ .pm = &qcom_geni_serial_pm_ops,
+ },
+};
+
+static int __init qcom_geni_serial_init(void)
+{
+ int ret;
+
+ ret = console_register(&qcom_geni_console_driver);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&qcom_geni_serial_platform_driver);
+ if (ret)
+ console_unregister(&qcom_geni_console_driver);
+ return ret;
+}
+module_init(qcom_geni_serial_init);
+
+static void __exit qcom_geni_serial_exit(void)
+{
+ platform_driver_unregister(&qcom_geni_serial_platform_driver);
+ console_unregister(&qcom_geni_console_driver);
+}
+module_exit(qcom_geni_serial_exit);
+
+MODULE_DESCRIPTION("Serial driver for GENI based QUP cores");
+MODULE_LICENSE("GPL v2");
diff --git a/rr-cache/aeafc43e3cc81632ac1fba082712d9e41209fb06/preimage b/rr-cache/aeafc43e3cc81632ac1fba082712d9e41209fb06/preimage
new file mode 100644
index 0000000..58e8f63
--- /dev/null
+++ b/rr-cache/aeafc43e3cc81632ac1fba082712d9e41209fb06/preimage
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+&pm8994_gpios {
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&ls_exp_gpio_f &bt_en_gpios>;
+
+ ls_exp_gpio_f: pm8994_gpio5 {
+ pinconf {
+ pins = "gpio5";
+ output-low;
+ power-source = <2>; // PM8994_GPIO_S4, 1.8V
+ };
+ };
+
+ bt_en_gpios: bt_en_gpios {
+ pinconf {
+ pins = "gpio19";
+ function = PMIC_GPIO_FUNC_NORMAL;
+ output-low;
+ power-source = <PM8994_GPIO_S4>; // 1.8V
+<<<<<<<
+ qcom,drive-strength = <3>;
+=======
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_LOW>;
+>>>>>>>
+ bias-pull-down;
+ };
+ };
+
+ wlan_en_gpios: wlan_en_gpios {
+ pinconf {
+ pins = "gpio8";
+ function = PMIC_GPIO_FUNC_NORMAL;
+ output-low;
+ power-source = <PM8994_GPIO_S4>; // 1.8V
+<<<<<<<
+ qcom,drive-strength = <3>;
+=======
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_LOW>;
+>>>>>>>
+ bias-pull-down;
+ };
+ };
+
+ volume_up_gpio: pm8996_gpio2 {
+ pinconf {
+ pins = "gpio2";
+ function = "normal";
+ input-enable;
+ drive-push-pull;
+ bias-pull-up;
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_NO>;
+ power-source = <PM8994_GPIO_S4>; // 1.8V
+ };
+ };
+
+ divclk4_pin_a: divclk4 {
+ pinconf {
+ pins = "gpio18";
+ function = PMIC_GPIO_FUNC_FUNC2;
+
+ bias-disable;
+ power-source = <PM8994_GPIO_S4>;
+ };
+ };
+
+ usb3_vbus_det_gpio: pm8996_gpio22 {
+ pinconf {
+ pins = "gpio22";
+ function = PMIC_GPIO_FUNC_NORMAL;
+ input-enable;
+ bias-pull-down;
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_NO>;
+ power-source = <PM8994_GPIO_S4>; // 1.8V
+ };
+ };
+};
+
+&pmi8994_gpios {
+ usb2_vbus_det_gpio: pmi8996_gpio6 {
+ pinconf {
+ pins = "gpio6";
+ function = PMIC_GPIO_FUNC_NORMAL;
+ input-enable;
+ bias-pull-down;
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_NO>;
+ power-source = <PM8994_GPIO_S4>; // 1.8V
+ };
+ };
+};
diff --git a/rr-cache/b99949bc1ff147a83e2e82fb563d2bcdc8be2c85/preimage b/rr-cache/b99949bc1ff147a83e2e82fb563d2bcdc8be2c85/preimage
new file mode 100644
index 0000000..2bfeff7
--- /dev/null
+++ b/rr-cache/b99949bc1ff147a83e2e82fb563d2bcdc8be2c85/preimage
@@ -0,0 +1,1737 @@
+/*
+ * drivers/mmc/host/sdhci-msm.c - Qualcomm SDHCI Platform driver
+ *
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/delay.h>
+#include <linux/mmc/mmc.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/interconnect.h>
+#include <linux/iopoll.h>
+#include <linux/regulator/consumer.h>
+
+#include "sdhci-pltfm.h"
+
+#define CORE_MCI_VERSION 0x50
+#define CORE_VERSION_MAJOR_SHIFT 28
+#define CORE_VERSION_MAJOR_MASK (0xf << CORE_VERSION_MAJOR_SHIFT)
+#define CORE_VERSION_MINOR_MASK 0xff
+
+#define CORE_MCI_GENERICS 0x70
+#define SWITCHABLE_SIGNALING_VOLTAGE BIT(29)
+
+#define CORE_HC_MODE 0x78
+#define HC_MODE_EN 0x1
+#define CORE_POWER 0x0
+#define CORE_SW_RST BIT(7)
+#define FF_CLK_SW_RST_DIS BIT(13)
+
+#define CORE_PWRCTL_STATUS 0xdc
+#define CORE_PWRCTL_MASK 0xe0
+#define CORE_PWRCTL_CLEAR 0xe4
+#define CORE_PWRCTL_CTL 0xe8
+#define CORE_PWRCTL_BUS_OFF BIT(0)
+#define CORE_PWRCTL_BUS_ON BIT(1)
+#define CORE_PWRCTL_IO_LOW BIT(2)
+#define CORE_PWRCTL_IO_HIGH BIT(3)
+#define CORE_PWRCTL_BUS_SUCCESS BIT(0)
+#define CORE_PWRCTL_IO_SUCCESS BIT(2)
+#define REQ_BUS_OFF BIT(0)
+#define REQ_BUS_ON BIT(1)
+#define REQ_IO_LOW BIT(2)
+#define REQ_IO_HIGH BIT(3)
+#define INT_MASK 0xf
+#define MAX_PHASES 16
+#define CORE_DLL_LOCK BIT(7)
+#define CORE_DDR_DLL_LOCK BIT(11)
+#define CORE_DLL_EN BIT(16)
+#define CORE_CDR_EN BIT(17)
+#define CORE_CK_OUT_EN BIT(18)
+#define CORE_CDR_EXT_EN BIT(19)
+#define CORE_DLL_PDN BIT(29)
+#define CORE_DLL_RST BIT(30)
+#define CORE_DLL_CONFIG 0x100
+#define CORE_CMD_DAT_TRACK_SEL BIT(0)
+#define CORE_DLL_STATUS 0x108
+
+#define CORE_DLL_CONFIG_2 0x1b4
+#define CORE_DDR_CAL_EN BIT(0)
+#define CORE_FLL_CYCLE_CNT BIT(18)
+#define CORE_DLL_CLOCK_DISABLE BIT(21)
+
+#define CORE_VENDOR_SPEC 0x10c
+#define CORE_VENDOR_SPEC_POR_VAL 0xa1c
+#define CORE_CLK_PWRSAVE BIT(1)
+#define CORE_HC_MCLK_SEL_DFLT (2 << 8)
+#define CORE_HC_MCLK_SEL_HS400 (3 << 8)
+#define CORE_HC_MCLK_SEL_MASK (3 << 8)
+#define CORE_IO_PAD_PWR_SWITCH_EN (1 << 15)
+#define CORE_IO_PAD_PWR_SWITCH (1 << 16)
+#define CORE_HC_SELECT_IN_EN BIT(18)
+#define CORE_HC_SELECT_IN_HS400 (6 << 19)
+#define CORE_HC_SELECT_IN_MASK (7 << 19)
+
+#define CORE_3_0V_SUPPORT (1 << 25)
+#define CORE_1_8V_SUPPORT (1 << 26)
+#define CORE_VOLT_SUPPORT (CORE_3_0V_SUPPORT | CORE_1_8V_SUPPORT)
+
+#define CORE_CSR_CDC_CTLR_CFG0 0x130
+#define CORE_SW_TRIG_FULL_CALIB BIT(16)
+#define CORE_HW_AUTOCAL_ENA BIT(17)
+
+#define CORE_CSR_CDC_CTLR_CFG1 0x134
+#define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138
+#define CORE_TIMER_ENA BIT(16)
+
+#define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C
+#define CORE_CSR_CDC_REFCOUNT_CFG 0x140
+#define CORE_CSR_CDC_COARSE_CAL_CFG 0x144
+#define CORE_CDC_OFFSET_CFG 0x14C
+#define CORE_CSR_CDC_DELAY_CFG 0x150
+#define CORE_CDC_SLAVE_DDA_CFG 0x160
+#define CORE_CSR_CDC_STATUS0 0x164
+#define CORE_CALIBRATION_DONE BIT(0)
+
+#define CORE_CDC_ERROR_CODE_MASK 0x7000000
+
+#define CORE_CSR_CDC_GEN_CFG 0x178
+#define CORE_CDC_SWITCH_BYPASS_OFF BIT(0)
+#define CORE_CDC_SWITCH_RC_EN BIT(1)
+
+#define CORE_DDR_200_CFG 0x184
+#define CORE_CDC_T4_DLY_SEL BIT(0)
+#define CORE_CMDIN_RCLK_EN BIT(1)
+#define CORE_START_CDC_TRAFFIC BIT(6)
+#define CORE_VENDOR_SPEC3 0x1b0
+#define CORE_PWRSAVE_DLL BIT(3)
+
+#define CORE_DDR_CONFIG 0x1b8
+#define DDR_CONFIG_POR_VAL 0x80040853
+
+#define CORE_VENDOR_SPEC_CAPABILITIES0 0x11c
+
+#define INVALID_TUNING_PHASE -1
+#define SDHCI_MSM_MIN_CLOCK 400000
+#define CORE_FREQ_100MHZ (100 * 1000 * 1000)
+
+#define CDR_SELEXT_SHIFT 20
+#define CDR_SELEXT_MASK (0xf << CDR_SELEXT_SHIFT)
+#define CMUX_SHIFT_PHASE_SHIFT 24
+#define CMUX_SHIFT_PHASE_MASK (7 << CMUX_SHIFT_PHASE_SHIFT)
+
+#define MSM_MMC_AUTOSUSPEND_DELAY_MS 50
+
+/* Timeout value to avoid infinite waiting for pwr_irq */
+#define MSM_PWR_IRQ_TIMEOUT_MS 5000
+
+struct sdhci_msm_host {
+ struct platform_device *pdev;
+ void __iomem *core_mem; /* MSM SDCC mapped address */
+ int pwr_irq; /* power irq */
+ struct clk *bus_clk; /* SDHC bus voter clock */
+ struct clk *xo_clk; /* TCXO clk needed for FLL feature of cm_dll*/
+ struct clk_bulk_data bulk_clks[4]; /* core, iface, cal, sleep clocks */
+ unsigned long clk_rate;
+ struct mmc_host *mmc;
+ bool use_14lpp_dll_reset;
+ bool tuning_done;
+ bool calibration_done;
+ u8 saved_tuning_phase;
+ bool use_cdclp533;
+ u32 curr_pwr_state;
+ u32 curr_io_level;
+ wait_queue_head_t pwr_irq_wait;
+ bool pwr_irq_flag;
+<<<<<<<
+ struct icc_path *path;
+=======
+ u32 caps_0;
+>>>>>>>
+};
+
+static unsigned int msm_get_clock_rate_for_bus_mode(struct sdhci_host *host,
+ unsigned int clock)
+{
+ struct mmc_ios ios = host->mmc->ios;
+ /*
+ * The SDHC requires internal clock frequency to be double the
+ * actual clock that will be set for DDR mode. The controller
+ * uses the faster clock(100/400MHz) for some of its parts and
+ * send the actual required clock (50/200MHz) to the card.
+ */
+ if (ios.timing == MMC_TIMING_UHS_DDR50 ||
+ ios.timing == MMC_TIMING_MMC_DDR52 ||
+ ios.timing == MMC_TIMING_MMC_HS400 ||
+ host->flags & SDHCI_HS400_TUNING)
+ clock *= 2;
+ return clock;
+}
+
+static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host,
+ unsigned int clock)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ struct mmc_ios curr_ios = host->mmc->ios;
+ struct clk *core_clk = msm_host->bulk_clks[0].clk;
+ int rc;
+
+ clock = msm_get_clock_rate_for_bus_mode(host, clock);
+ rc = clk_set_rate(core_clk, clock);
+ if (rc) {
+ pr_err("%s: Failed to set clock at rate %u at timing %d\n",
+ mmc_hostname(host->mmc), clock,
+ curr_ios.timing);
+ return;
+ }
+ msm_host->clk_rate = clock;
+ pr_debug("%s: Setting clock at rate %lu at timing %d\n",
+ mmc_hostname(host->mmc), clk_get_rate(core_clk),
+ curr_ios.timing);
+}
+
+/* Platform specific tuning */
+static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host, u8 poll)
+{
+ u32 wait_cnt = 50;
+ u8 ck_out_en;
+ struct mmc_host *mmc = host->mmc;
+
+ /* Poll for CK_OUT_EN bit. max. poll time = 50us */
+ ck_out_en = !!(readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) &
+ CORE_CK_OUT_EN);
+
+ while (ck_out_en != poll) {
+ if (--wait_cnt == 0) {
+ dev_err(mmc_dev(mmc), "%s: CK_OUT_EN bit is not %d\n",
+ mmc_hostname(mmc), poll);
+ return -ETIMEDOUT;
+ }
+ udelay(1);
+
+ ck_out_en = !!(readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) &
+ CORE_CK_OUT_EN);
+ }
+
+ return 0;
+}
+
+static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
+{
+ int rc;
+ static const u8 grey_coded_phase_table[] = {
+ 0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
+ 0xc, 0xd, 0xf, 0xe, 0xa, 0xb, 0x9, 0x8
+ };
+ unsigned long flags;
+ u32 config;
+ struct mmc_host *mmc = host->mmc;
+
+ if (phase > 0xf)
+ return -EINVAL;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
+ config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+
+ /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
+ rc = msm_dll_poll_ck_out_en(host, 0);
+ if (rc)
+ goto err_out;
+
+ /*
+ * Write the selected DLL clock output phase (0 ... 15)
+ * to CDR_SELEXT bit field of DLL_CONFIG register.
+ */
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config &= ~CDR_SELEXT_MASK;
+ config |= grey_coded_phase_table[phase] << CDR_SELEXT_SHIFT;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config |= CORE_CK_OUT_EN;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+
+ /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
+ rc = msm_dll_poll_ck_out_en(host, 1);
+ if (rc)
+ goto err_out;
+
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config |= CORE_CDR_EN;
+ config &= ~CORE_CDR_EXT_EN;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+ goto out;
+
+err_out:
+ dev_err(mmc_dev(mmc), "%s: Failed to set DLL phase: %d\n",
+ mmc_hostname(mmc), phase);
+out:
+ spin_unlock_irqrestore(&host->lock, flags);
+ return rc;
+}
+
+/*
+ * Find out the greatest range of consecuitive selected
+ * DLL clock output phases that can be used as sampling
+ * setting for SD3.0 UHS-I card read operation (in SDR104
+ * timing mode) or for eMMC4.5 card read operation (in
+ * HS400/HS200 timing mode).
+ * Select the 3/4 of the range and configure the DLL with the
+ * selected DLL clock output phase.
+ */
+
+static int msm_find_most_appropriate_phase(struct sdhci_host *host,
+ u8 *phase_table, u8 total_phases)
+{
+ int ret;
+ u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
+ u8 phases_per_row[MAX_PHASES] = { 0 };
+ int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
+ int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
+ bool phase_0_found = false, phase_15_found = false;
+ struct mmc_host *mmc = host->mmc;
+
+ if (!total_phases || (total_phases > MAX_PHASES)) {
+ dev_err(mmc_dev(mmc), "%s: Invalid argument: total_phases=%d\n",
+ mmc_hostname(mmc), total_phases);
+ return -EINVAL;
+ }
+
+ for (cnt = 0; cnt < total_phases; cnt++) {
+ ranges[row_index][col_index] = phase_table[cnt];
+ phases_per_row[row_index] += 1;
+ col_index++;
+
+ if ((cnt + 1) == total_phases) {
+ continue;
+ /* check if next phase in phase_table is consecutive or not */
+ } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) {
+ row_index++;
+ col_index = 0;
+ }
+ }
+
+ if (row_index >= MAX_PHASES)
+ return -EINVAL;
+
+ /* Check if phase-0 is present in first valid window? */
+ if (!ranges[0][0]) {
+ phase_0_found = true;
+ phase_0_raw_index = 0;
+ /* Check if cycle exist between 2 valid windows */
+ for (cnt = 1; cnt <= row_index; cnt++) {
+ if (phases_per_row[cnt]) {
+ for (i = 0; i < phases_per_row[cnt]; i++) {
+ if (ranges[cnt][i] == 15) {
+ phase_15_found = true;
+ phase_15_raw_index = cnt;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ /* If 2 valid windows form cycle then merge them as single window */
+ if (phase_0_found && phase_15_found) {
+ /* number of phases in raw where phase 0 is present */
+ u8 phases_0 = phases_per_row[phase_0_raw_index];
+ /* number of phases in raw where phase 15 is present */
+ u8 phases_15 = phases_per_row[phase_15_raw_index];
+
+ if (phases_0 + phases_15 >= MAX_PHASES)
+ /*
+ * If there are more than 1 phase windows then total
+ * number of phases in both the windows should not be
+ * more than or equal to MAX_PHASES.
+ */
+ return -EINVAL;
+
+ /* Merge 2 cyclic windows */
+ i = phases_15;
+ for (cnt = 0; cnt < phases_0; cnt++) {
+ ranges[phase_15_raw_index][i] =
+ ranges[phase_0_raw_index][cnt];
+ if (++i >= MAX_PHASES)
+ break;
+ }
+
+ phases_per_row[phase_0_raw_index] = 0;
+ phases_per_row[phase_15_raw_index] = phases_15 + phases_0;
+ }
+
+ for (cnt = 0; cnt <= row_index; cnt++) {
+ if (phases_per_row[cnt] > curr_max) {
+ curr_max = phases_per_row[cnt];
+ selected_row_index = cnt;
+ }
+ }
+
+ i = (curr_max * 3) / 4;
+ if (i)
+ i--;
+
+ ret = ranges[selected_row_index][i];
+
+ if (ret >= MAX_PHASES) {
+ ret = -EINVAL;
+ dev_err(mmc_dev(mmc), "%s: Invalid phase selected=%d\n",
+ mmc_hostname(mmc), ret);
+ }
+
+ return ret;
+}
+
+static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
+{
+ u32 mclk_freq = 0, config;
+
+ /* Program the MCLK value to MCLK_FREQ bit field */
+ if (host->clock <= 112000000)
+ mclk_freq = 0;
+ else if (host->clock <= 125000000)
+ mclk_freq = 1;
+ else if (host->clock <= 137000000)
+ mclk_freq = 2;
+ else if (host->clock <= 150000000)
+ mclk_freq = 3;
+ else if (host->clock <= 162000000)
+ mclk_freq = 4;
+ else if (host->clock <= 175000000)
+ mclk_freq = 5;
+ else if (host->clock <= 187000000)
+ mclk_freq = 6;
+ else if (host->clock <= 200000000)
+ mclk_freq = 7;
+
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config &= ~CMUX_SHIFT_PHASE_MASK;
+ config |= mclk_freq << CMUX_SHIFT_PHASE_SHIFT;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+}
+
+/* Initialize the DLL (Programmable Delay Line) */
+static int msm_init_cm_dll(struct sdhci_host *host)
+{
+ struct mmc_host *mmc = host->mmc;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ int wait_cnt = 50;
+ unsigned long flags;
+ u32 config;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ /*
+ * Make sure that clock is always enabled when DLL
+ * tuning is in progress. Keeping PWRSAVE ON may
+ * turn off the clock.
+ */
+ config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
+ config &= ~CORE_CLK_PWRSAVE;
+ writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
+
+ if (msm_host->use_14lpp_dll_reset) {
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config &= ~CORE_CK_OUT_EN;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2);
+ config |= CORE_DLL_CLOCK_DISABLE;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG_2);
+ }
+
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config |= CORE_DLL_RST;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config |= CORE_DLL_PDN;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+ msm_cm_dll_set_freq(host);
+
+ if (msm_host->use_14lpp_dll_reset &&
+ !IS_ERR_OR_NULL(msm_host->xo_clk)) {
+ u32 mclk_freq = 0;
+
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2);
+ config &= CORE_FLL_CYCLE_CNT;
+ if (config)
+ mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 8),
+ clk_get_rate(msm_host->xo_clk));
+ else
+ mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 4),
+ clk_get_rate(msm_host->xo_clk));
+
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2);
+ config &= ~(0xFF << 10);
+ config |= mclk_freq << 10;
+
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG_2);
+ /* wait for 5us before enabling DLL clock */
+ udelay(5);
+ }
+
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config &= ~CORE_DLL_RST;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config &= ~CORE_DLL_PDN;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+
+ if (msm_host->use_14lpp_dll_reset) {
+ msm_cm_dll_set_freq(host);
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2);
+ config &= ~CORE_DLL_CLOCK_DISABLE;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG_2);
+ }
+
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config |= CORE_DLL_EN;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config |= CORE_CK_OUT_EN;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+
+ /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
+ while (!(readl_relaxed(host->ioaddr + CORE_DLL_STATUS) &
+ CORE_DLL_LOCK)) {
+ /* max. wait for 50us sec for LOCK bit to be set */
+ if (--wait_cnt == 0) {
+ dev_err(mmc_dev(mmc), "%s: DLL failed to LOCK\n",
+ mmc_hostname(mmc));
+ spin_unlock_irqrestore(&host->lock, flags);
+ return -ETIMEDOUT;
+ }
+ udelay(1);
+ }
+
+ spin_unlock_irqrestore(&host->lock, flags);
+ return 0;
+}
+
+static void msm_hc_select_default(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ u32 config;
+
+ if (!msm_host->use_cdclp533) {
+ config = readl_relaxed(host->ioaddr +
+ CORE_VENDOR_SPEC3);
+ config &= ~CORE_PWRSAVE_DLL;
+ writel_relaxed(config, host->ioaddr +
+ CORE_VENDOR_SPEC3);
+ }
+
+ config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
+ config &= ~CORE_HC_MCLK_SEL_MASK;
+ config |= CORE_HC_MCLK_SEL_DFLT;
+ writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
+
+ /*
+ * Disable HC_SELECT_IN to be able to use the UHS mode select
+ * configuration from Host Control2 register for all other
+ * modes.
+ * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
+ * in VENDOR_SPEC_FUNC
+ */
+ config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
+ config &= ~CORE_HC_SELECT_IN_EN;
+ config &= ~CORE_HC_SELECT_IN_MASK;
+ writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
+
+ /*
+ * Make sure above writes impacting free running MCLK are completed
+ * before changing the clk_rate at GCC.
+ */
+ wmb();
+}
+
+static void msm_hc_select_hs400(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ struct mmc_ios ios = host->mmc->ios;
+ u32 config, dll_lock;
+ int rc;
+
+ /* Select the divided clock (free running MCLK/2) */
+ config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
+ config &= ~CORE_HC_MCLK_SEL_MASK;
+ config |= CORE_HC_MCLK_SEL_HS400;
+
+ writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
+ /*
+ * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
+ * register
+ */
+ if ((msm_host->tuning_done || ios.enhanced_strobe) &&
+ !msm_host->calibration_done) {
+ config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
+ config |= CORE_HC_SELECT_IN_HS400;
+ config |= CORE_HC_SELECT_IN_EN;
+ writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
+ }
+ if (!msm_host->clk_rate && !msm_host->use_cdclp533) {
+ /*
+ * Poll on DLL_LOCK or DDR_DLL_LOCK bits in
+ * CORE_DLL_STATUS to be set. This should get set
+ * within 15 us at 200 MHz.
+ */
+ rc = readl_relaxed_poll_timeout(host->ioaddr +
+ CORE_DLL_STATUS,
+ dll_lock,
+ (dll_lock &
+ (CORE_DLL_LOCK |
+ CORE_DDR_DLL_LOCK)), 10,
+ 1000);
+ if (rc == -ETIMEDOUT)
+ pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
+ mmc_hostname(host->mmc), dll_lock);
+ }
+ /*
+ * Make sure above writes impacting free running MCLK are completed
+ * before changing the clk_rate at GCC.
+ */
+ wmb();
+}
+
+/*
+ * sdhci_msm_hc_select_mode :- In general all timing modes are
+ * controlled via UHS mode select in Host Control2 register.
+ * eMMC specific HS200/HS400 doesn't have their respective modes
+ * defined here, hence we use these values.
+ *
+ * HS200 - SDR104 (Since they both are equivalent in functionality)
+ * HS400 - This involves multiple configurations
+ * Initially SDR104 - when tuning is required as HS200
+ * Then when switching to DDR @ 400MHz (HS400) we use
+ * the vendor specific HC_SELECT_IN to control the mode.
+ *
+ * In addition to controlling the modes we also need to select the
+ * correct input clock for DLL depending on the mode.
+ *
+ * HS400 - divided clock (free running MCLK/2)
+ * All other modes - default (free running MCLK)
+ */
+static void sdhci_msm_hc_select_mode(struct sdhci_host *host)
+{
+ struct mmc_ios ios = host->mmc->ios;
+
+ if (ios.timing == MMC_TIMING_MMC_HS400 ||
+ host->flags & SDHCI_HS400_TUNING)
+ msm_hc_select_hs400(host);
+ else
+ msm_hc_select_default(host);
+}
+
+static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ u32 config, calib_done;
+ int ret;
+
+ pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
+
+ /*
+ * Retuning in HS400 (DDR mode) will fail, just reset the
+ * tuning block and restore the saved tuning phase.
+ */
+ ret = msm_init_cm_dll(host);
+ if (ret)
+ goto out;
+
+ /* Set the selected phase in delay line hw block */
+ ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
+ if (ret)
+ goto out;
+
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config |= CORE_CMD_DAT_TRACK_SEL;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+
+ config = readl_relaxed(host->ioaddr + CORE_DDR_200_CFG);
+ config &= ~CORE_CDC_T4_DLY_SEL;
+ writel_relaxed(config, host->ioaddr + CORE_DDR_200_CFG);
+
+ config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG);
+ config &= ~CORE_CDC_SWITCH_BYPASS_OFF;
+ writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG);
+
+ config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG);
+ config |= CORE_CDC_SWITCH_RC_EN;
+ writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG);
+
+ config = readl_relaxed(host->ioaddr + CORE_DDR_200_CFG);
+ config &= ~CORE_START_CDC_TRAFFIC;
+ writel_relaxed(config, host->ioaddr + CORE_DDR_200_CFG);
+
+ /* Perform CDC Register Initialization Sequence */
+
+ writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+ writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
+ writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
+ writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
+ writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
+ writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
+ writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
+ writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
+ writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
+
+ /* CDC HW Calibration */
+
+ config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+ config |= CORE_SW_TRIG_FULL_CALIB;
+ writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+
+ config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+ config &= ~CORE_SW_TRIG_FULL_CALIB;
+ writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+
+ config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+ config |= CORE_HW_AUTOCAL_ENA;
+ writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+
+ config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
+ config |= CORE_TIMER_ENA;
+ writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
+
+ ret = readl_relaxed_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
+ calib_done,
+ (calib_done & CORE_CALIBRATION_DONE),
+ 1, 50);
+
+ if (ret == -ETIMEDOUT) {
+ pr_err("%s: %s: CDC calibration was not completed\n",
+ mmc_hostname(host->mmc), __func__);
+ goto out;
+ }
+
+ ret = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
+ & CORE_CDC_ERROR_CODE_MASK;
+ if (ret) {
+ pr_err("%s: %s: CDC error code %d\n",
+ mmc_hostname(host->mmc), __func__, ret);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ config = readl_relaxed(host->ioaddr + CORE_DDR_200_CFG);
+ config |= CORE_START_CDC_TRAFFIC;
+ writel_relaxed(config, host->ioaddr + CORE_DDR_200_CFG);
+out:
+ pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
+ __func__, ret);
+ return ret;
+}
+
+static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
+{
+ struct mmc_host *mmc = host->mmc;
+ u32 dll_status, config;
+ int ret;
+
+ pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
+
+ /*
+ * Currently the CORE_DDR_CONFIG register defaults to desired
+ * configuration on reset. Currently reprogramming the power on
+ * reset (POR) value in case it might have been modified by
+ * bootloaders. In the future, if this changes, then the desired
+ * values will need to be programmed appropriately.
+ */
+ writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr + CORE_DDR_CONFIG);
+
+ if (mmc->ios.enhanced_strobe) {
+ config = readl_relaxed(host->ioaddr + CORE_DDR_200_CFG);
+ config |= CORE_CMDIN_RCLK_EN;
+ writel_relaxed(config, host->ioaddr + CORE_DDR_200_CFG);
+ }
+
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2);
+ config |= CORE_DDR_CAL_EN;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG_2);
+
+ ret = readl_relaxed_poll_timeout(host->ioaddr + CORE_DLL_STATUS,
+ dll_status,
+ (dll_status & CORE_DDR_DLL_LOCK),
+ 10, 1000);
+
+ if (ret == -ETIMEDOUT) {
+ pr_err("%s: %s: CM_DLL_SDC4 calibration was not completed\n",
+ mmc_hostname(host->mmc), __func__);
+ goto out;
+ }
+
+ config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC3);
+ config |= CORE_PWRSAVE_DLL;
+ writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC3);
+
+ /*
+ * Drain writebuffer to ensure above DLL calibration
+ * and PWRSAVE DLL is enabled.
+ */
+ wmb();
+out:
+ pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
+ __func__, ret);
+ return ret;
+}
+
+static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ struct mmc_host *mmc = host->mmc;
+ int ret;
+ u32 config;
+
+ pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
+
+ /*
+ * Retuning in HS400 (DDR mode) will fail, just reset the
+ * tuning block and restore the saved tuning phase.
+ */
+ ret = msm_init_cm_dll(host);
+ if (ret)
+ goto out;
+
+ if (!mmc->ios.enhanced_strobe) {
+ /* Set the selected phase in delay line hw block */
+ ret = msm_config_cm_dll_phase(host,
+ msm_host->saved_tuning_phase);
+ if (ret)
+ goto out;
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config |= CORE_CMD_DAT_TRACK_SEL;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+ }
+
+ if (msm_host->use_cdclp533)
+ ret = sdhci_msm_cdclp533_calibration(host);
+ else
+ ret = sdhci_msm_cm_dll_sdc4_calibration(host);
+out:
+ pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
+ __func__, ret);
+ return ret;
+}
+
+static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ int tuning_seq_cnt = 3;
+ u8 phase, tuned_phases[16], tuned_phase_cnt = 0;
+ int rc;
+ struct mmc_ios ios = host->mmc->ios;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+
+ /*
+ * Tuning is required for SDR104, HS200 and HS400 cards and
+ * if clock frequency is greater than 100MHz in these modes.
+ */
+ if (host->clock <= CORE_FREQ_100MHZ ||
+ !(ios.timing == MMC_TIMING_MMC_HS400 ||
+ ios.timing == MMC_TIMING_MMC_HS200 ||
+ ios.timing == MMC_TIMING_UHS_SDR104))
+ return 0;
+
+ /*
+ * For HS400 tuning in HS200 timing requires:
+ * - select MCLK/2 in VENDOR_SPEC
+ * - program MCLK to 400MHz (or nearest supported) in GCC
+ */
+ if (host->flags & SDHCI_HS400_TUNING) {
+ sdhci_msm_hc_select_mode(host);
+ msm_set_clock_rate_for_bus_mode(host, ios.clock);
+ host->flags &= ~SDHCI_HS400_TUNING;
+ }
+
+retry:
+ /* First of all reset the tuning block */
+ rc = msm_init_cm_dll(host);
+ if (rc)
+ return rc;
+
+ phase = 0;
+ do {
+ /* Set the phase in delay line hw block */
+ rc = msm_config_cm_dll_phase(host, phase);
+ if (rc)
+ return rc;
+
+ msm_host->saved_tuning_phase = phase;
+ rc = mmc_send_tuning(mmc, opcode, NULL);
+ if (!rc) {
+ /* Tuning is successful at this tuning point */
+ tuned_phases[tuned_phase_cnt++] = phase;
+ dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n",
+ mmc_hostname(mmc), phase);
+ }
+ } while (++phase < ARRAY_SIZE(tuned_phases));
+
+ if (tuned_phase_cnt) {
+ rc = msm_find_most_appropriate_phase(host, tuned_phases,
+ tuned_phase_cnt);
+ if (rc < 0)
+ return rc;
+ else
+ phase = rc;
+
+ /*
+ * Finally set the selected phase in delay
+ * line hw block.
+ */
+ rc = msm_config_cm_dll_phase(host, phase);
+ if (rc)
+ return rc;
+ dev_dbg(mmc_dev(mmc), "%s: Setting the tuning phase to %d\n",
+ mmc_hostname(mmc), phase);
+ } else {
+ if (--tuning_seq_cnt)
+ goto retry;
+ /* Tuning failed */
+ dev_dbg(mmc_dev(mmc), "%s: No tuning point found\n",
+ mmc_hostname(mmc));
+ rc = -EIO;
+ }
+
+ if (!rc)
+ msm_host->tuning_done = true;
+ return rc;
+}
+
+/*
+ * sdhci_msm_hs400 - Calibrate the DLL for HS400 bus speed mode operation.
+ * This needs to be done for both tuning and enhanced_strobe mode.
+ * DLL operation is only needed for clock > 100MHz. For clock <= 100MHz
+ * fixed feedback clock is used.
+ */
+static void sdhci_msm_hs400(struct sdhci_host *host, struct mmc_ios *ios)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ int ret;
+
+ if (host->clock > CORE_FREQ_100MHZ &&
+ (msm_host->tuning_done || ios->enhanced_strobe) &&
+ !msm_host->calibration_done) {
+ ret = sdhci_msm_hs400_dll_calibration(host);
+ if (!ret)
+ msm_host->calibration_done = true;
+ else
+ pr_err("%s: Failed to calibrate DLL for hs400 mode (%d)\n",
+ mmc_hostname(host->mmc), ret);
+ }
+}
+
+static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
+ unsigned int uhs)
+{
+ struct mmc_host *mmc = host->mmc;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ u16 ctrl_2;
+ u32 config;
+
+ ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ /* Select Bus Speed Mode for host */
+ ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+ switch (uhs) {
+ case MMC_TIMING_UHS_SDR12:
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
+ break;
+ case MMC_TIMING_UHS_SDR25:
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
+ break;
+ case MMC_TIMING_UHS_SDR50:
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
+ break;
+ case MMC_TIMING_MMC_HS400:
+ case MMC_TIMING_MMC_HS200:
+ case MMC_TIMING_UHS_SDR104:
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
+ break;
+ case MMC_TIMING_UHS_DDR50:
+ case MMC_TIMING_MMC_DDR52:
+ ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
+ break;
+ }
+
+ /*
+ * When clock frequency is less than 100MHz, the feedback clock must be
+ * provided and DLL must not be used so that tuning can be skipped. To
+ * provide feedback clock, the mode selection can be any value less
+ * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
+ */
+ if (host->clock <= CORE_FREQ_100MHZ) {
+ if (uhs == MMC_TIMING_MMC_HS400 ||
+ uhs == MMC_TIMING_MMC_HS200 ||
+ uhs == MMC_TIMING_UHS_SDR104)
+ ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+ /*
+ * DLL is not required for clock <= 100MHz
+ * Thus, make sure DLL it is disabled when not required
+ */
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config |= CORE_DLL_RST;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config |= CORE_DLL_PDN;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+
+ /*
+ * The DLL needs to be restored and CDCLP533 recalibrated
+ * when the clock frequency is set back to 400MHz.
+ */
+ msm_host->calibration_done = false;
+ }
+
+ dev_dbg(mmc_dev(mmc), "%s: clock=%u uhs=%u ctrl_2=0x%x\n",
+ mmc_hostname(host->mmc), host->clock, uhs, ctrl_2);
+ sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+
+ if (mmc->ios.timing == MMC_TIMING_MMC_HS400)
+ sdhci_msm_hs400(host, &mmc->ios);
+}
+
+static inline void sdhci_msm_init_pwr_irq_wait(struct sdhci_msm_host *msm_host)
+{
+ init_waitqueue_head(&msm_host->pwr_irq_wait);
+}
+
+static inline void sdhci_msm_complete_pwr_irq_wait(
+ struct sdhci_msm_host *msm_host)
+{
+ wake_up(&msm_host->pwr_irq_wait);
+}
+
+/*
+ * sdhci_msm_check_power_status API should be called when registers writes
+ * which can toggle sdhci IO bus ON/OFF or change IO lines HIGH/LOW happens.
+ * To what state the register writes will change the IO lines should be passed
+ * as the argument req_type. This API will check whether the IO line's state
+ * is already the expected state and will wait for power irq only if
+ * power irq is expected to be trigerred based on the current IO line state
+ * and expected IO line state.
+ */
+static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ bool done = false;
+ u32 val;
+
+ pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
+ mmc_hostname(host->mmc), __func__, req_type,
+ msm_host->curr_pwr_state, msm_host->curr_io_level);
+
+ /*
+ * The power interrupt will not be generated for signal voltage
+ * switches if SWITCHABLE_SIGNALING_VOLTAGE in MCI_GENERICS is not set.
+ */
+ val = readl(msm_host->core_mem + CORE_MCI_GENERICS);
+ if ((req_type & REQ_IO_HIGH || req_type & REQ_IO_LOW) &&
+ !(val & SWITCHABLE_SIGNALING_VOLTAGE)) {
+ return;
+ }
+
+ /*
+ * The IRQ for request type IO High/LOW will be generated when -
+ * there is a state change in 1.8V enable bit (bit 3) of
+ * SDHCI_HOST_CONTROL2 register. The reset state of that bit is 0
+ * which indicates 3.3V IO voltage. So, when MMC core layer tries
+ * to set it to 3.3V before card detection happens, the
+ * IRQ doesn't get triggered as there is no state change in this bit.
+ * The driver already handles this case by changing the IO voltage
+ * level to high as part of controller power up sequence. Hence, check
+ * for host->pwr to handle a case where IO voltage high request is
+ * issued even before controller power up.
+ */
+ if ((req_type & REQ_IO_HIGH) && !host->pwr) {
+ pr_debug("%s: do not wait for power IRQ that never comes, req_type: %d\n",
+ mmc_hostname(host->mmc), req_type);
+ return;
+ }
+ if ((req_type & msm_host->curr_pwr_state) ||
+ (req_type & msm_host->curr_io_level))
+ done = true;
+ /*
+ * This is needed here to handle cases where register writes will
+ * not change the current bus state or io level of the controller.
+ * In this case, no power irq will be triggerred and we should
+ * not wait.
+ */
+ if (!done) {
+ if (!wait_event_timeout(msm_host->pwr_irq_wait,
+ msm_host->pwr_irq_flag,
+ msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS)))
+ dev_warn(&msm_host->pdev->dev,
+ "%s: pwr_irq for req: (%d) timed out\n",
+ mmc_hostname(host->mmc), req_type);
+ }
+ pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
+ __func__, req_type);
+}
+
+static void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+
+ pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n",
+ mmc_hostname(host->mmc),
+ readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS),
+ readl_relaxed(msm_host->core_mem + CORE_PWRCTL_MASK),
+ readl_relaxed(msm_host->core_mem + CORE_PWRCTL_CTL));
+}
+
+static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ u32 irq_status, irq_ack = 0;
+ int retry = 10;
+ u32 pwr_state = 0, io_level = 0;
+ u32 config;
+
+ irq_status = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS);
+ irq_status &= INT_MASK;
+
+ writel_relaxed(irq_status, msm_host->core_mem + CORE_PWRCTL_CLEAR);
+
+ /*
+ * There is a rare HW scenario where the first clear pulse could be
+ * lost when actual reset and clear/read of status register is
+ * happening at a time. Hence, retry for at least 10 times to make
+ * sure status register is cleared. Otherwise, this will result in
+ * a spurious power IRQ resulting in system instability.
+ */
+ while (irq_status & readl_relaxed(msm_host->core_mem +
+ CORE_PWRCTL_STATUS)) {
+ if (retry == 0) {
+ pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
+ mmc_hostname(host->mmc), irq_status);
+ sdhci_msm_dump_pwr_ctrl_regs(host);
+ WARN_ON(1);
+ break;
+ }
+ writel_relaxed(irq_status,
+ msm_host->core_mem + CORE_PWRCTL_CLEAR);
+ retry--;
+ udelay(10);
+ }
+
+ /* Handle BUS ON/OFF*/
+ if (irq_status & CORE_PWRCTL_BUS_ON) {
+ pwr_state = REQ_BUS_ON;
+ io_level = REQ_IO_HIGH;
+ irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
+ }
+ if (irq_status & CORE_PWRCTL_BUS_OFF) {
+ pwr_state = REQ_BUS_OFF;
+ io_level = REQ_IO_LOW;
+ irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
+ }
+ /* Handle IO LOW/HIGH */
+ if (irq_status & CORE_PWRCTL_IO_LOW) {
+ io_level = REQ_IO_LOW;
+ irq_ack |= CORE_PWRCTL_IO_SUCCESS;
+ }
+ if (irq_status & CORE_PWRCTL_IO_HIGH) {
+ io_level = REQ_IO_HIGH;
+ irq_ack |= CORE_PWRCTL_IO_SUCCESS;
+ }
+
+ /*
+ * The driver has to acknowledge the interrupt, switch voltages and
+ * report back if it succeded or not to this register. The voltage
+ * switches are handled by the sdhci core, so just report success.
+ */
+ writel_relaxed(irq_ack, msm_host->core_mem + CORE_PWRCTL_CTL);
+
+ /*
+ * If we don't have info regarding the voltage levels supported by
+ * regulators, don't change the IO PAD PWR SWITCH.
+ */
+ if (msm_host->caps_0 & CORE_VOLT_SUPPORT) {
+ u32 new_config;
+ /*
+ * We should unset IO PAD PWR switch only if the register write
+ * can set IO lines high and the regulator also switches to 3 V.
+ * Else, we should keep the IO PAD PWR switch set.
+ * This is applicable to certain targets where eMMC vccq supply
+ * is only 1.8V. In such targets, even during REQ_IO_HIGH, the
+ * IO PAD PWR switch must be kept set to reflect actual
+ * regulator voltage. This way, during initialization of
+ * controllers with only 1.8V, we will set the IO PAD bit
+ * without waiting for a REQ_IO_LOW.
+ */
+ config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
+ new_config = config;
+
+ if ((io_level & REQ_IO_HIGH) &&
+ (msm_host->caps_0 & CORE_3_0V_SUPPORT))
+ new_config &= ~CORE_IO_PAD_PWR_SWITCH;
+ else if ((io_level & REQ_IO_LOW) ||
+ (msm_host->caps_0 & CORE_1_8V_SUPPORT))
+ new_config |= CORE_IO_PAD_PWR_SWITCH;
+
+ if (config ^ new_config)
+ writel_relaxed(new_config,
+ host->ioaddr + CORE_VENDOR_SPEC);
+ }
+
+ if (pwr_state)
+ msm_host->curr_pwr_state = pwr_state;
+ if (io_level)
+ msm_host->curr_io_level = io_level;
+
+ pr_debug("%s: %s: Handled IRQ(%d), irq_status=0x%x, ack=0x%x\n",
+ mmc_hostname(msm_host->mmc), __func__, irq, irq_status,
+ irq_ack);
+}
+
+static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
+{
+ struct sdhci_host *host = (struct sdhci_host *)data;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+
+ sdhci_msm_handle_pwr_irq(host, irq);
+ msm_host->pwr_irq_flag = 1;
+ sdhci_msm_complete_pwr_irq_wait(msm_host);
+
+
+ return IRQ_HANDLED;
+}
+
+static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ struct clk *core_clk = msm_host->bulk_clks[0].clk;
+
+ return clk_round_rate(core_clk, ULONG_MAX);
+}
+
+static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
+{
+ return SDHCI_MSM_MIN_CLOCK;
+}
+
+/**
+ * __sdhci_msm_set_clock - sdhci_msm clock control.
+ *
+ * Description:
+ * MSM controller does not use internal divider and
+ * instead directly control the GCC clock as per
+ * HW recommendation.
+ **/
+static void __sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ u16 clk;
+ /*
+ * Keep actual_clock as zero -
+ * - since there is no divider used so no need of having actual_clock.
+ * - MSM controller uses SDCLK for data timeout calculation. If
+ * actual_clock is zero, host->clock is taken for calculation.
+ */
+ host->mmc->actual_clock = 0;
+
+ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+
+ if (clock == 0)
+ return;
+
+ /*
+ * MSM controller do not use clock divider.
+ * Thus read SDHCI_CLOCK_CONTROL and only enable
+ * clock with no divider value programmed.
+ */
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ sdhci_enable_clk(host, clk);
+}
+
+/* sdhci_msm_set_clock - Called with (host->lock) spinlock held. */
+static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+
+ if (!clock) {
+ msm_host->clk_rate = clock;
+ goto out;
+ }
+
+ sdhci_msm_hc_select_mode(host);
+
+ msm_set_clock_rate_for_bus_mode(host, clock);
+out:
+ __sdhci_msm_set_clock(host, clock);
+}
+
+/*
+ * Platform specific register write functions. This is so that, if any
+ * register write needs to be followed up by platform specific actions,
+ * they can be added here. These functions can go to sleep when writes
+ * to certain registers are done.
+ * These functions are relying on sdhci_set_ios not using spinlock.
+ */
+static int __sdhci_msm_check_write(struct sdhci_host *host, u16 val, int reg)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ u32 req_type = 0;
+
+ switch (reg) {
+ case SDHCI_HOST_CONTROL2:
+ req_type = (val & SDHCI_CTRL_VDD_180) ? REQ_IO_LOW :
+ REQ_IO_HIGH;
+ break;
+ case SDHCI_SOFTWARE_RESET:
+ if (host->pwr && (val & SDHCI_RESET_ALL))
+ req_type = REQ_BUS_OFF;
+ break;
+ case SDHCI_POWER_CONTROL:
+ req_type = !val ? REQ_BUS_OFF : REQ_BUS_ON;
+ break;
+ }
+
+ if (req_type) {
+ msm_host->pwr_irq_flag = 0;
+ /*
+ * Since this register write may trigger a power irq, ensure
+ * all previous register writes are complete by this point.
+ */
+ mb();
+ }
+ return req_type;
+}
+
+/* This function may sleep*/
+static void sdhci_msm_writew(struct sdhci_host *host, u16 val, int reg)
+{
+ u32 req_type = 0;
+
+ req_type = __sdhci_msm_check_write(host, val, reg);
+ writew_relaxed(val, host->ioaddr + reg);
+
+ if (req_type)
+ sdhci_msm_check_power_status(host, req_type);
+}
+
+/* This function may sleep*/
+static void sdhci_msm_writeb(struct sdhci_host *host, u8 val, int reg)
+{
+ u32 req_type = 0;
+
+ req_type = __sdhci_msm_check_write(host, val, reg);
+
+ writeb_relaxed(val, host->ioaddr + reg);
+
+ if (req_type)
+ sdhci_msm_check_power_status(host, req_type);
+}
+
+<<<<<<<
+static int sdhci_msm_set_icc(struct sdhci_msm_host *msm_host, unsigned int rate)
+{
+
+ if (IS_ERR(msm_host->path)) {
+ WARN_ON(1);
+ return 0;
+ }
+
+ if (rate == INT_MAX)
+ icc_set(msm_host->path, 2048000, 4096000);
+ else
+ icc_set(msm_host->path, 0, 0);
+
+ return 0;
+}
+
+static void sdhci_msm_deinit_icc(struct sdhci_msm_host *msm_host)
+{
+ if (!IS_ERR(msm_host->path))
+ icc_put(msm_host->path);
+=======
+static void sdhci_msm_set_regulator_caps(struct sdhci_msm_host *msm_host)
+{
+ struct mmc_host *mmc = msm_host->mmc;
+ struct regulator *supply = mmc->supply.vqmmc;
+ u32 caps = 0, config;
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ if (!IS_ERR(mmc->supply.vqmmc)) {
+ if (regulator_is_supported_voltage(supply, 1700000, 1950000))
+ caps |= CORE_1_8V_SUPPORT;
+ if (regulator_is_supported_voltage(supply, 2700000, 3600000))
+ caps |= CORE_3_0V_SUPPORT;
+
+ if (!caps)
+ pr_warn("%s: 1.8/3V not supported for vqmmc\n",
+ mmc_hostname(mmc));
+ }
+
+ if (caps) {
+ /*
+ * Set the PAD_PWR_SWITCH_EN bit so that the PAD_PWR_SWITCH
+ * bit can be used as required later on.
+ */
+ u32 io_level = msm_host->curr_io_level;
+
+ config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
+ config |= CORE_IO_PAD_PWR_SWITCH_EN;
+
+ if ((io_level & REQ_IO_HIGH) && (caps & CORE_3_0V_SUPPORT))
+ config &= ~CORE_IO_PAD_PWR_SWITCH;
+ else if ((io_level & REQ_IO_LOW) || (caps & CORE_1_8V_SUPPORT))
+ config |= CORE_IO_PAD_PWR_SWITCH;
+
+ writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
+ }
+ msm_host->caps_0 |= caps;
+ pr_debug("%s: supported caps: 0x%08x\n", mmc_hostname(mmc), caps);
+>>>>>>>
+}
+
+static const struct of_device_id sdhci_msm_dt_match[] = {
+ { .compatible = "qcom,sdhci-msm-v4" },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
+
+static const struct sdhci_ops sdhci_msm_ops = {
+ .reset = sdhci_reset,
+ .set_clock = sdhci_msm_set_clock,
+ .get_min_clock = sdhci_msm_get_min_clock,
+ .get_max_clock = sdhci_msm_get_max_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
+ .write_w = sdhci_msm_writew,
+ .write_b = sdhci_msm_writeb,
+};
+
+static const struct sdhci_pltfm_data sdhci_msm_pdata = {
+ .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
+ SDHCI_QUIRK_SINGLE_POWER_WRITE |
+ SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ .ops = &sdhci_msm_ops,
+};
+
+static int sdhci_msm_probe(struct platform_device *pdev)
+{
+ struct sdhci_host *host;
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_msm_host *msm_host;
+ struct resource *core_memres;
+ struct clk *clk;
+ int ret;
+ u16 host_version, core_minor;
+ u32 core_version, config;
+ u8 core_major;
+
+ host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host));
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ host->sdma_boundary = 0;
+ pltfm_host = sdhci_priv(host);
+ msm_host = sdhci_pltfm_priv(pltfm_host);
+ msm_host->mmc = host->mmc;
+ msm_host->pdev = pdev;
+
+ ret = mmc_of_parse(host->mmc);
+ if (ret)
+ goto pltfm_free;
+
+ sdhci_get_of_property(pdev);
+
+ msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
+
+ /* Setup SDCC bus voter clock. */
+ msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus");
+ if (!IS_ERR(msm_host->bus_clk)) {
+ /* Vote for max. clk rate for max. performance */
+ ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
+ if (ret)
+ goto pltfm_free;
+ ret = clk_prepare_enable(msm_host->bus_clk);
+ if (ret)
+ goto pltfm_free;
+ }
+
+ /* Setup main peripheral bus clock */
+ clk = devm_clk_get(&pdev->dev, "iface");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(&pdev->dev, "Peripheral clk setup failed (%d)\n", ret);
+ goto bus_clk_disable;
+ }
+ msm_host->bulk_clks[1].clk = clk;
+
+ /* Setup SDC MMC clock */
+ clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(&pdev->dev, "SDC MMC clk setup failed (%d)\n", ret);
+ goto bus_clk_disable;
+ }
+ msm_host->bulk_clks[0].clk = clk;
+
+ /* Vote for maximum clock rate for maximum performance */
+ ret = clk_set_rate(clk, INT_MAX);
+ if (ret)
+ dev_warn(&pdev->dev, "core clock boost failed\n");
+
+ clk = devm_clk_get(&pdev->dev, "cal");
+ if (IS_ERR(clk))
+ clk = NULL;
+ msm_host->bulk_clks[2].clk = clk;
+
+ clk = devm_clk_get(&pdev->dev, "sleep");
+ if (IS_ERR(clk))
+ clk = NULL;
+ msm_host->bulk_clks[3].clk = clk;
+
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
+ msm_host->bulk_clks);
+ if (ret)
+ goto bus_clk_disable;
+
+ msm_host->path = of_icc_get(&pdev->dev, "ddr");
+ if (IS_ERR(msm_host->path)) {
+ ret = PTR_ERR(msm_host->path);
+ goto clk_disable;
+ }
+ sdhci_msm_set_icc(msm_host, INT_MAX);
+
+ /*
+ * xo clock is needed for FLL feature of cm_dll.
+ * In case if xo clock is not mentioned in DT, warn and proceed.
+ */
+ msm_host->xo_clk = devm_clk_get(&pdev->dev, "xo");
+ if (IS_ERR(msm_host->xo_clk)) {
+ ret = PTR_ERR(msm_host->xo_clk);
+ dev_warn(&pdev->dev, "TCXO clk not present (%d)\n", ret);
+ }
+
+ core_memres = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ msm_host->core_mem = devm_ioremap_resource(&pdev->dev, core_memres);
+
+ if (IS_ERR(msm_host->core_mem)) {
+ dev_err(&pdev->dev, "Failed to remap registers\n");
+ ret = PTR_ERR(msm_host->core_mem);
+ goto clk_disable;
+ }
+
+ /* Reset the vendor spec register to power on reset state */
+ writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
+ host->ioaddr + CORE_VENDOR_SPEC);
+
+ /* Set HC_MODE_EN bit in HC_MODE register */
+ writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
+
+ config = readl_relaxed(msm_host->core_mem + CORE_HC_MODE);
+ config |= FF_CLK_SW_RST_DIS;
+ writel_relaxed(config, msm_host->core_mem + CORE_HC_MODE);
+
+ host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
+ dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
+ host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
+ SDHCI_VENDOR_VER_SHIFT));
+
+ core_version = readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION);
+ core_major = (core_version & CORE_VERSION_MAJOR_MASK) >>
+ CORE_VERSION_MAJOR_SHIFT;
+ core_minor = core_version & CORE_VERSION_MINOR_MASK;
+ dev_dbg(&pdev->dev, "MCI Version: 0x%08x, major: 0x%04x, minor: 0x%02x\n",
+ core_version, core_major, core_minor);
+
+ if (core_major == 1 && core_minor >= 0x42)
+ msm_host->use_14lpp_dll_reset = true;
+
+ /*
+ * SDCC 5 controller with major version 1, minor version 0x34 and later
+ * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL.
+ */
+ if (core_major == 1 && core_minor < 0x34)
+ msm_host->use_cdclp533 = true;
+
+ /*
+ * Support for some capabilities is not advertised by newer
+ * controller versions and must be explicitly enabled.
+ */
+ if (core_major >= 1 && core_minor != 0x11 && core_minor != 0x12) {
+ config = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
+ config |= SDHCI_CAN_VDD_300 | SDHCI_CAN_DO_8BIT;
+ writel_relaxed(config, host->ioaddr +
+ CORE_VENDOR_SPEC_CAPABILITIES0);
+ }
+
+ /*
+ * Power on reset state may trigger power irq if previous status of
+ * PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq
+ * interrupt in GIC, any pending power irq interrupt should be
+ * acknowledged. Otherwise power irq interrupt handler would be
+ * fired prematurely.
+ */
+ sdhci_msm_handle_pwr_irq(host, 0);
+
+ /*
+ * Ensure that above writes are propogated before interrupt enablement
+ * in GIC.
+ */
+ mb();
+
+ /* Setup IRQ for handling power/voltage tasks with PMIC */
+ msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
+ if (msm_host->pwr_irq < 0) {
+ dev_err(&pdev->dev, "Get pwr_irq failed (%d)\n",
+ msm_host->pwr_irq);
+ ret = msm_host->pwr_irq;
+ goto clk_disable;
+ }
+
+ sdhci_msm_init_pwr_irq_wait(msm_host);
+ /* Enable pwr irq interrupts */
+ writel_relaxed(INT_MASK, msm_host->core_mem + CORE_PWRCTL_MASK);
+
+ ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
+ sdhci_msm_pwr_irq, IRQF_ONESHOT,
+ dev_name(&pdev->dev), host);
+ if (ret) {
+ dev_err(&pdev->dev, "Request IRQ failed (%d)\n", ret);
+ goto clk_disable;
+ }
+
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev,
+ MSM_MMC_AUTOSUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(&pdev->dev);
+
+ host->mmc_host_ops.execute_tuning = sdhci_msm_execute_tuning;
+ ret = sdhci_add_host(host);
+ if (ret)
+ goto pm_runtime_disable;
+ sdhci_msm_set_regulator_caps(msm_host);
+
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
+ return 0;
+
+pm_runtime_disable:
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+clk_disable:
+ clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
+ msm_host->bulk_clks);
+bus_clk_disable:
+ if (!IS_ERR(msm_host->bus_clk))
+ clk_disable_unprepare(msm_host->bus_clk);
+pltfm_free:
+ sdhci_pltfm_free(pdev);
+ return ret;
+}
+
+static int sdhci_msm_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
+ 0xffffffff);
+
+ sdhci_remove_host(host, dead);
+
+ pm_runtime_get_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+
+ clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
+ msm_host->bulk_clks);
+ sdhci_msm_deinit_icc(msm_host);
+
+ if (!IS_ERR(msm_host->bus_clk))
+ clk_disable_unprepare(msm_host->bus_clk);
+ sdhci_pltfm_free(pdev);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int sdhci_msm_runtime_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+
+ clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
+ msm_host->bulk_clks);
+
+ return 0;
+}
+
+static int sdhci_msm_runtime_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+
+ return clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
+ msm_host->bulk_clks);
+}
+#endif
+
+static const struct dev_pm_ops sdhci_msm_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend,
+ sdhci_msm_runtime_resume,
+ NULL)
+};
+
+static struct platform_driver sdhci_msm_driver = {
+ .probe = sdhci_msm_probe,
+ .remove = sdhci_msm_remove,
+ .driver = {
+ .name = "sdhci_msm",
+ .of_match_table = sdhci_msm_dt_match,
+ .pm = &sdhci_msm_pm_ops,
+ },
+};
+
+module_platform_driver(sdhci_msm_driver);
+
+MODULE_DESCRIPTION("Qualcomm Secure Digital Host Controller Interface driver");
+MODULE_LICENSE("GPL v2");
diff --git a/rr-cache/bd9fcd8e64f89f7d4c90ad64ee8bc94d4b3a7b0e/preimage b/rr-cache/bd9fcd8e64f89f7d4c90ad64ee8bc94d4b3a7b0e/preimage
new file mode 100644
index 0000000..deacb6d
--- /dev/null
+++ b/rr-cache/bd9fcd8e64f89f7d4c90ad64ee8bc94d4b3a7b0e/preimage
@@ -0,0 +1,804 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_NUMA_BALANCING=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_USER_NS=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_JUMP_LABEL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_ARCH_SUNXI=y
+CONFIG_ARCH_ALPINE=y
+CONFIG_ARCH_BCM2835=y
+CONFIG_ARCH_BCM_IPROC=y
+CONFIG_ARCH_BERLIN=y
+CONFIG_ARCH_BRCMSTB=y
+CONFIG_ARCH_EXYNOS=y
+CONFIG_ARCH_LAYERSCAPE=y
+CONFIG_ARCH_LG1K=y
+CONFIG_ARCH_HISI=y
+CONFIG_ARCH_MEDIATEK=y
+CONFIG_ARCH_MESON=y
+CONFIG_ARCH_MVEBU=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_MSM8996=y
+CONFIG_ARCH_ROCKCHIP=y
+CONFIG_ARCH_SEATTLE=y
+CONFIG_ARCH_RENESAS=y
+CONFIG_ARCH_R8A7795=y
+CONFIG_ARCH_R8A7796=y
+CONFIG_ARCH_R8A77965=y
+CONFIG_ARCH_R8A77970=y
+CONFIG_ARCH_R8A77980=y
+CONFIG_ARCH_R8A77990=y
+CONFIG_ARCH_R8A77995=y
+CONFIG_ARCH_STRATIX10=y
+CONFIG_ARCH_TEGRA=y
+CONFIG_ARCH_SPRD=y
+CONFIG_ARCH_SYNQUACER=y
+CONFIG_ARCH_THUNDER=y
+CONFIG_ARCH_THUNDER2=y
+CONFIG_ARCH_UNIPHIER=y
+CONFIG_ARCH_VEXPRESS=y
+CONFIG_ARCH_XGENE=y
+CONFIG_ARCH_ZX=y
+CONFIG_ARCH_ZYNQMP=y
+CONFIG_PCI=y
+CONFIG_HOTPLUG_PCI_PCIE=y
+CONFIG_PCI_IOV=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_ACPI=y
+CONFIG_PCI_LAYERSCAPE=y
+CONFIG_PCI_HISI=y
+CONFIG_PCIE_QCOM=y
+CONFIG_PCIE_KIRIN=y
+CONFIG_PCIE_ARMADA_8K=y
+CONFIG_PCIE_HISI_STB=y
+CONFIG_PCI_AARDVARK=y
+CONFIG_PCI_TEGRA=y
+CONFIG_PCIE_RCAR=y
+CONFIG_PCIE_ROCKCHIP=y
+CONFIG_PCIE_ROCKCHIP_HOST=m
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PCI_XGENE=y
+CONFIG_PCI_HOST_THUNDER_PEM=y
+CONFIG_PCI_HOST_THUNDER_ECAM=y
+CONFIG_ARM64_VA_BITS_48=y
+CONFIG_SCHED_MC=y
+CONFIG_NUMA=y
+CONFIG_PREEMPT=y
+CONFIG_KSM=y
+CONFIG_MEMORY_FAILURE=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CMA=y
+CONFIG_SECCOMP=y
+CONFIG_KEXEC=y
+CONFIG_CRASH_DUMP=y
+CONFIG_XEN=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_COMPAT=y
+CONFIG_HIBERNATION=y
+CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_ATTR_SET=y
+CONFIG_CPU_FREQ_GOV_COMMON=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=m
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPUFREQ_DT=y
+CONFIG_ARM_ARMADA_37XX_CPUFREQ=y
+CONFIG_ARM_BIG_LITTLE_CPUFREQ=y
+CONFIG_ARM_SCPI_CPUFREQ=y
+CONFIG_ARM_TEGRA186_CPUFREQ=y
+CONFIG_ACPI_CPPC_CPUFREQ=m
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IPV6=m
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_NAT=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_BRIDGE=m
+CONFIG_BRIDGE_VLAN_FILTERING=y
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_VLAN_8021Q_MVRP=y
+CONFIG_BPF_JIT=y
+CONFIG_BT=m
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=m
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=m
+# CONFIG_BT_HS is not set
+# CONFIG_BT_LE is not set
+CONFIG_BT_LEDS=y
+<<<<<<<
+# CONFIG_BT_DEBUGFS is not set
+CONFIG_BT_HCIBTUSB=m
+=======
+CONFIG_BT_HCIBTUSB=m
+CONFIG_BT_HCIBTSDIO=m
+>>>>>>>
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_BCSP=y
+CONFIG_BT_HCIUART_LL=y
+<<<<<<<
+CONFIG_BT_HCIUART_3WIRE=y
+CONFIG_CFG80211=y
+# CONFIG_CFG80211_DEFAULT_PS is not set
+CONFIG_CFG80211_WEXT=y
+CONFIG_MAC80211=y
+=======
+CONFIG_BT_HCIUART_BCM=y
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
+>>>>>>>
+CONFIG_MAC80211_LEDS=y
+CONFIG_RFKILL=y
+CONFIG_WCN36XX=m
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DMA_CMA=y
+<<<<<<<
+=======
+CONFIG_CMA_SIZE_MBYTES=32
+CONFIG_HISILICON_LPC=y
+>>>>>>>
+CONFIG_SIMPLE_PM_BUS=y
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_DENALI_DT=y
+CONFIG_MTD_NAND_MARVELL=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_NBD=m
+CONFIG_VIRTIO_BLK=y
+CONFIG_BLK_DEV_NVME=m
+CONFIG_QCOM_COINCELL=m
+CONFIG_SRAM=y
+CONFIG_EEPROM_AT25=m
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+CONFIG_SCSI_SAS_ATA=y
+CONFIG_SCSI_HISI_SAS=y
+CONFIG_SCSI_HISI_SAS_PCI=y
+<<<<<<<
+CONFIG_SCSI_UFSHCD=m
+CONFIG_SCSI_UFSHCD_PLATFORM=m
+CONFIG_SCSI_UFS_QCOM=m
+=======
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+>>>>>>>
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_AHCI_CEVA=y
+CONFIG_AHCI_MVEBU=y
+CONFIG_AHCI_XGENE=y
+CONFIG_AHCI_QORIQ=y
+CONFIG_SATA_SIL24=y
+CONFIG_SATA_RCAR=y
+CONFIG_PATA_PLATFORM=y
+CONFIG_PATA_OF_PLATFORM=y
+CONFIG_NETDEVICES=y
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_TUN=y
+CONFIG_VETH=m
+CONFIG_VIRTIO_NET=y
+CONFIG_AMD_XGBE=y
+CONFIG_NET_XGENE=y
+<<<<<<<
+CONFIG_ATL1C=m
+=======
+CONFIG_ATL1C=y
+>>>>>>>
+CONFIG_MACB=y
+CONFIG_THUNDER_NIC_PF=y
+CONFIG_HIX5HD2_GMAC=y
+CONFIG_HNS_DSAF=y
+CONFIG_HNS_ENET=y
+CONFIG_E1000E=y
+CONFIG_IGB=y
+CONFIG_IGBVF=y
+CONFIG_MVNETA=y
+CONFIG_MVPP2=y
+CONFIG_SKY2=y
+CONFIG_QCOM_EMAC=m
+CONFIG_RAVB=y
+CONFIG_SMC91X=y
+CONFIG_SMSC911X=y
+CONFIG_SNI_AVE=y
+CONFIG_SNI_NETSEC=y
+CONFIG_STMMAC_ETH=m
+CONFIG_DWMAC_IPQ806X=m
+CONFIG_DWMAC_MESON=m
+CONFIG_DWMAC_ROCKCHIP=m
+CONFIG_DWMAC_SUNXI=m
+CONFIG_DWMAC_SUN8I=m
+CONFIG_MDIO_BUS_MUX_MMIOREG=y
+CONFIG_AT803X_PHY=m
+CONFIG_MARVELL_PHY=m
+CONFIG_MARVELL_10G_PHY=m
+CONFIG_MESON_GXL_PHY=m
+CONFIG_MICREL_PHY=y
+CONFIG_REALTEK_PHY=m
+CONFIG_ROCKCHIP_PHY=y
+CONFIG_USB_PEGASUS=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_RTL8152=m
+CONFIG_USB_LAN78XX=m
+CONFIG_USB_USBNET=m
+CONFIG_USB_NET_DM9601=m
+CONFIG_USB_NET_SR9800=m
+CONFIG_USB_NET_SMSC75XX=m
+CONFIG_USB_NET_SMSC95XX=m
+CONFIG_USB_NET_PLUSB=m
+CONFIG_USB_NET_MCS7830=m
+CONFIG_ATH10K=m
+CONFIG_ATH10K_PCI=m
+CONFIG_BRCMFMAC=m
+<<<<<<<
+CONFIG_ATH10K=y
+CONFIG_ATH10K_PCI=y
+=======
+CONFIG_MWIFIEX=m
+CONFIG_MWIFIEX_PCIE=m
+>>>>>>>
+CONFIG_WL18XX=m
+CONFIG_WLCORE_SDIO=m
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_ADC=m
+CONFIG_KEYBOARD_CROS_EC=y
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ATMEL_MXT=m
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_PM8941_PWRKEY=y
+CONFIG_INPUT_HISI_POWERKEY=y
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_AMBAKMI=y
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_BCM2835AUX=y
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_8250_MT6577=y
+CONFIG_SERIAL_8250_UNIPHIER=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_MESON=y
+CONFIG_SERIAL_MESON_CONSOLE=y
+CONFIG_SERIAL_SAMSUNG=y
+CONFIG_SERIAL_SAMSUNG_CONSOLE=y
+CONFIG_SERIAL_TEGRA=y
+CONFIG_SERIAL_SH_SCI=y
+CONFIG_SERIAL_SH_SCI_NR_UARTS=11
+CONFIG_SERIAL_SH_SCI_CONSOLE=y
+CONFIG_SERIAL_MSM=y
+CONFIG_SERIAL_QCOM_GENI=y
+CONFIG_SERIAL_QCOM_GENI_CONSOLE=y
+CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_SERIAL_XILINX_PS_UART=y
+CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
+CONFIG_SERIAL_MVEBU_UART=y
+CONFIG_SERIAL_DEV_BUS=y
+CONFIG_SERIAL_DEV_CTRL_TTYPORT=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_I2C_HID=m
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_MUX_PCA954x=y
+CONFIG_I2C_BCM2835=m
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
+CONFIG_I2C_IMX=y
+CONFIG_I2C_MESON=y
+CONFIG_I2C_MV64XXX=y
+CONFIG_I2C_PXA=y
+CONFIG_I2C_QUP=y
+CONFIG_I2C_RK3X=y
+CONFIG_I2C_SH_MOBILE=y
+CONFIG_I2C_TEGRA=y
+CONFIG_I2C_UNIPHIER_F=y
+CONFIG_I2C_RCAR=y
+CONFIG_I2C_CROS_EC_TUNNEL=y
+CONFIG_SPI=y
+CONFIG_SPI_ARMADA_3700=y
+CONFIG_SPI_MESON_SPICC=m
+CONFIG_SPI_MESON_SPIFC=m
+CONFIG_SPI_BCM2835=m
+CONFIG_SPI_BCM2835AUX=m
+CONFIG_SPI_ORION=y
+CONFIG_SPI_PL022=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_ROCKCHIP=y
+CONFIG_SPI_S3C64XX=y
+CONFIG_SPI_SPIDEV=m
+CONFIG_SPMI=y
+CONFIG_PINCTRL_IPQ8074=y
+CONFIG_PINCTRL_SINGLE=y
+CONFIG_PINCTRL_MAX77620=y
+CONFIG_PINCTRL_MSM8916=y
+CONFIG_PINCTRL_MSM8994=y
+CONFIG_PINCTRL_MSM8996=y
+<<<<<<<
+CONFIG_PINCTRL_MSM8998=y
+CONFIG_PINCTRL_SDM845=y
+=======
+CONFIG_PINCTRL_MT7622=y
+>>>>>>>
+CONFIG_PINCTRL_QDF2XXX=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_GPIO_DWAPB=y
+CONFIG_GPIO_MB86S7X=y
+CONFIG_GPIO_PL061=y
+CONFIG_GPIO_RCAR=y
+CONFIG_GPIO_UNIPHIER=y
+CONFIG_GPIO_XGENE=y
+CONFIG_GPIO_XGENE_SB=y
+CONFIG_GPIO_PCA953X=y
+CONFIG_GPIO_PCA953X_IRQ=y
+CONFIG_GPIO_MAX77620=y
+CONFIG_POWER_AVS=y
+<<<<<<<
+CONFIG_QCOM_CPR=y
+=======
+CONFIG_ROCKCHIP_IODOMAIN=y
+>>>>>>>
+CONFIG_POWER_RESET_MSM=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_SYSCON_REBOOT_MODE=y
+CONFIG_BATTERY_BQ27XXX=y
+CONFIG_SENSORS_ARM_SCPI=y
+CONFIG_SENSORS_LM90=m
+CONFIG_SENSORS_INA2XX=m
+CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
+CONFIG_CPU_THERMAL=y
+CONFIG_THERMAL_EMULATION=y
+CONFIG_ARMADA_THERMAL=y
+CONFIG_BRCMSTB_THERMAL=m
+CONFIG_EXYNOS_THERMAL=y
+CONFIG_RCAR_GEN3_THERMAL=y
+CONFIG_QCOM_TSENS=y
+CONFIG_ROCKCHIP_THERMAL=m
+CONFIG_TEGRA_BPMP_THERMAL=m
+CONFIG_QCOM_SPMI_TEMP_ALARM=m
+CONFIG_UNIPHIER_THERMAL=y
+CONFIG_WATCHDOG=y
+CONFIG_S3C2410_WATCHDOG=y
+CONFIG_QCOM_WDT=m
+CONFIG_MESON_GXBB_WATCHDOG=m
+CONFIG_MESON_WATCHDOG=m
+CONFIG_RENESAS_WDT=y
+CONFIG_UNIPHIER_WATCHDOG=y
+CONFIG_BCM2835_WDT=y
+CONFIG_MFD_AXP20X_RSB=y
+CONFIG_MFD_CROS_EC=y
+CONFIG_MFD_CROS_EC_I2C=y
+CONFIG_MFD_CROS_EC_SPI=y
+CONFIG_MFD_CROS_EC_CHARDEV=m
+CONFIG_MFD_EXYNOS_LPASS=m
+CONFIG_MFD_HI6421_PMIC=y
+CONFIG_MFD_HI655X_PMIC=y
+CONFIG_MFD_MAX77620=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_RK808=y
+CONFIG_MFD_SEC_CORE=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_AXP20X=y
+CONFIG_REGULATOR_FAN53555=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_GPIO=y
+CONFIG_REGULATOR_HI6421V530=y
+CONFIG_REGULATOR_HI655X=y
+CONFIG_REGULATOR_MAX77620=y
+CONFIG_REGULATOR_PWM=y
+CONFIG_REGULATOR_QCOM_RPMH=y
+CONFIG_REGULATOR_QCOM_SMD_RPM=y
+CONFIG_REGULATOR_QCOM_SPMI=y
+CONFIG_REGULATOR_RK808=y
+CONFIG_REGULATOR_S2MPS11=y
+CONFIG_MEDIA_SUPPORT=m
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_ANALOG_TV_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_MEDIA_RC_SUPPORT=y
+CONFIG_RC_CORE=m
+CONFIG_RC_DEVICES=y
+CONFIG_RC_DECODERS=y
+CONFIG_IR_MESON=m
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+# CONFIG_DVB_NET is not set
+CONFIG_V4L_MEM2MEM_DRIVERS=y
+CONFIG_VIDEO_SAMSUNG_S5P_JPEG=m
+CONFIG_VIDEO_SAMSUNG_S5P_MFC=m
+CONFIG_VIDEO_SAMSUNG_EXYNOS_GSC=m
+CONFIG_VIDEO_RENESAS_FCP=m
+CONFIG_VIDEO_RENESAS_VSP1=m
+CONFIG_DRM=y
+CONFIG_DRM_NOUVEAU=m
+CONFIG_DRM_EXYNOS=m
+CONFIG_DRM_EXYNOS5433_DECON=y
+CONFIG_DRM_EXYNOS7_DECON=y
+CONFIG_DRM_EXYNOS_DSI=y
+# CONFIG_DRM_EXYNOS_DP is not set
+CONFIG_DRM_EXYNOS_HDMI=y
+CONFIG_DRM_EXYNOS_MIC=y
+CONFIG_DRM_ROCKCHIP=m
+CONFIG_ROCKCHIP_ANALOGIX_DP=y
+CONFIG_ROCKCHIP_CDN_DP=y
+CONFIG_ROCKCHIP_DW_HDMI=y
+CONFIG_ROCKCHIP_DW_MIPI_DSI=y
+CONFIG_ROCKCHIP_INNO_HDMI=y
+CONFIG_DRM_RCAR_DU=m
+CONFIG_DRM_RCAR_LVDS=y
+CONFIG_DRM_RCAR_VSP=y
+CONFIG_DRM_TEGRA=m
+CONFIG_DRM_PANEL_SIMPLE=m
+CONFIG_DRM_I2C_ADV7511=m
+CONFIG_DRM_I2C_ADV7511_AUDIO=y
+CONFIG_DRM_VC4=m
+CONFIG_DRM_HISI_HIBMC=m
+CONFIG_DRM_HISI_KIRIN=m
+CONFIG_DRM_MESON=m
+CONFIG_FB=y
+CONFIG_FB_ARMCLCD=y
+CONFIG_BACKLIGHT_GENERIC=m
+CONFIG_BACKLIGHT_PWM=m
+CONFIG_BACKLIGHT_LP855X=m
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SND_BCM2835_SOC_I2S=m
+CONFIG_SND_SOC_QCOM=y
+CONFIG_SND_SOC_APQ8016_SBC=y
+CONFIG_SND_SOC_QDSP6=y
+CONFIG_SND_SOC_MSM8996=y
+CONFIG_SND_SOC_SAMSUNG=y
+CONFIG_SND_SOC_RCAR=m
+CONFIG_SND_SOC_AK4613=m
+<<<<<<<
+CONFIG_SND_SIMPLE_CARD=m
+CONFIG_SND_AUDIO_GRAPH_CARD=m
+=======
+CONFIG_SND_SOC_MSM8916_WCD_ANALOG=y
+CONFIG_SND_SOC_MSM8916_WCD_DIGITAL=y
+CONFIG_SND_SIMPLE_CARD=y
+>>>>>>>
+CONFIG_USB=y
+CONFIG_USB_OTG=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_TEGRA=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_EXYNOS=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_EXYNOS=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_RENESAS_USBHS=m
+CONFIG_USB_STORAGE=y
+CONFIG_USB_MUSB_HDRC=y
+CONFIG_USB_MUSB_SUNXI=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC2=y
+CONFIG_USB_CHIPIDEA=y
+CONFIG_USB_CHIPIDEA_UDC=y
+CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_USB_CHIPIDEA_ULPI=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_HSIC_USB3503=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_ULPI=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_RENESAS_USBHS_UDC=m
+CONFIG_USB_RENESAS_USB3=m
+CONFIG_USB_ULPI_BUS=y
+CONFIG_MMC=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_ARMMMCI=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_ACPI=y
+CONFIG_MMC_SDHCI_F_SDH30=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_OF_ARASAN=y
+CONFIG_MMC_SDHCI_OF_ESDHC=y
+CONFIG_MMC_SDHCI_CADENCE=y
+CONFIG_MMC_SDHCI_TEGRA=y
+CONFIG_MMC_MESON_GX=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_SPI=y
+CONFIG_MMC_SDHI=y
+CONFIG_MMC_DW=y
+CONFIG_MMC_DW_EXYNOS=y
+CONFIG_MMC_DW_HI3798CV200=y
+CONFIG_MMC_DW_K3=y
+CONFIG_MMC_DW_ROCKCHIP=y
+CONFIG_MMC_SUNXI=y
+CONFIG_MMC_BCM2835=y
+CONFIG_MMC_SDHCI_XENON=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_PWM=y
+CONFIG_LEDS_SYSCON=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_LEDS_TRIGGER_PANIC=y
+CONFIG_LEDS_TRIGGER_DISK=y
+CONFIG_EDAC=y
+CONFIG_EDAC_GHES=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_MAX77686=y
+CONFIG_RTC_DRV_RK808=m
+CONFIG_RTC_DRV_S5M=y
+CONFIG_RTC_DRV_DS3232=y
+CONFIG_RTC_DRV_EFI=y
+CONFIG_RTC_DRV_S3C=y
+CONFIG_RTC_DRV_PL031=y
+CONFIG_RTC_DRV_SUN6I=y
+CONFIG_RTC_DRV_ARMADA38X=y
+CONFIG_RTC_DRV_PM8XXX=m
+CONFIG_RTC_DRV_TEGRA=y
+CONFIG_RTC_DRV_XGENE=y
+CONFIG_RTC_DRV_CROS_EC=y
+CONFIG_DMADEVICES=y
+CONFIG_DMA_BCM2835=m
+CONFIG_K3_DMA=y
+CONFIG_MV_XOR_V2=y
+CONFIG_PL330_DMA=y
+CONFIG_TEGRA20_APB_DMA=y
+CONFIG_QCOM_BAM_DMA=y
+CONFIG_QCOM_HIDMA_MGMT=y
+CONFIG_QCOM_HIDMA=y
+CONFIG_RCAR_DMAC=y
+CONFIG_RENESAS_USB_DMAC=m
+CONFIG_VFIO=y
+CONFIG_VFIO_PCI=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_XEN_GNTDEV=y
+CONFIG_XEN_GRANT_DEV_ALLOC=y
+CONFIG_COMMON_CLK_RK808=y
+CONFIG_COMMON_CLK_SCPI=y
+CONFIG_COMMON_CLK_CS2000_CP=y
+CONFIG_COMMON_CLK_S2MPS11=y
+CONFIG_CLK_QORIQ=y
+CONFIG_COMMON_CLK_PWM=y
+CONFIG_COMMON_CLK_QCOM=y
+CONFIG_QCOM_CLK_SMD_RPM=y
+CONFIG_IPQ_GCC_8074=y
+CONFIG_MSM_CLK_RPMH=y
+CONFIG_MSM_GCC_8916=y
+CONFIG_MSM_GCC_8994=y
+CONFIG_MSM_GCC_8998=y
+CONFIG_MSM_MMCC_8996=y
+CONFIG_HWSPINLOCK=y
+CONFIG_SDM_GCC_845=y
+CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_ARM_MHU=y
+CONFIG_PLATFORM_MHU=y
+CONFIG_BCM2835_MBOX=y
+CONFIG_HI6220_MBOX=y
+CONFIG_QCOM_APCS_IPC=y
+CONFIG_ROCKCHIP_IOMMU=y
+CONFIG_TEGRA_IOMMU_SMMU=y
+CONFIG_ARM_SMMU=y
+CONFIG_ARM_SMMU_V3=y
+CONFIG_QCOM_IOMMU=y
+<<<<<<<
+CONFIG_REMOTEPROC=y
+CONFIG_QCOM_ADSP_PIL=y
+CONFIG_QCOM_Q6V5_PIL=y
+CONFIG_QCOM_WCNSS_PIL=y
+CONFIG_RPMSG_QCOM_GLINK_RPM=y
+CONFIG_RPMSG_QCOM_GLINK_SMEM=y
+=======
+CONFIG_RPMSG_QCOM_GLINK_RPM=y
+>>>>>>>
+CONFIG_RPMSG_QCOM_SMD=y
+CONFIG_RASPBERRYPI_POWER=y
+CONFIG_QCOM_RPMH=y
+CONFIG_QCOM_SMEM=y
+CONFIG_QCOM_SMD_RPM=y
+CONFIG_QCOM_SMP2P=y
+CONFIG_QCOM_SMSM=y
+CONFIG_QCOM_WCNSS_CTRL=y
+CONFIG_QCOM_APR=y
+CONFIG_ROCKCHIP_PM_DOMAINS=y
+CONFIG_ARCH_TEGRA_132_SOC=y
+CONFIG_ARCH_TEGRA_210_SOC=y
+CONFIG_ARCH_TEGRA_186_SOC=y
+CONFIG_ARCH_TEGRA_194_SOC=y
+CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
+CONFIG_EXTCON_USB_GPIO=y
+CONFIG_EXTCON_USBC_CROS_EC=y
+CONFIG_MEMORY=y
+CONFIG_TEGRA_MC=y
+CONFIG_IIO=y
+CONFIG_EXYNOS_ADC=y
+CONFIG_ROCKCHIP_SARADC=m
+CONFIG_IIO_CROS_EC_SENSORS_CORE=m
+CONFIG_IIO_CROS_EC_SENSORS=m
+CONFIG_IIO_CROS_EC_LIGHT_PROX=m
+CONFIG_IIO_CROS_EC_BARO=m
+CONFIG_PWM=y
+CONFIG_PWM_BCM2835=m
+CONFIG_PWM_CROS_EC=m
+CONFIG_PWM_MESON=m
+CONFIG_PWM_RCAR=m
+CONFIG_PWM_ROCKCHIP=y
+CONFIG_PWM_SAMSUNG=y
+CONFIG_PWM_TEGRA=m
+CONFIG_PHY_HISTB_COMBPHY=y
+CONFIG_PHY_HISI_INNO_USB2=y
+CONFIG_PHY_RCAR_GEN3_USB2=y
+CONFIG_PHY_RCAR_GEN3_USB3=m
+CONFIG_PHY_HI6220_USB=y
+CONFIG_PHY_QCOM_QMP=y
+CONFIG_PHY_QCOM_USB_HS=y
+CONFIG_PHY_SUN4I_USB=y
+CONFIG_PHY_MVEBU_CP110_COMPHY=y
+CONFIG_PHY_QCOM_QMP=m
+CONFIG_PHY_ROCKCHIP_INNO_USB2=y
+CONFIG_PHY_ROCKCHIP_EMMC=y
+CONFIG_PHY_ROCKCHIP_PCIE=m
+CONFIG_PHY_ROCKCHIP_TYPEC=y
+CONFIG_PHY_XGENE=y
+CONFIG_PHY_TEGRA_XUSB=y
+CONFIG_QCOM_L2_PMU=y
+CONFIG_QCOM_L3_PMU=y
+CONFIG_MESON_EFUSE=m
+CONFIG_QCOM_QFPROM=y
+CONFIG_ROCKCHIP_EFUSE=y
+CONFIG_UNIPHIER_EFUSE=y
+CONFIG_TEE=y
+CONFIG_OPTEE=y
+CONFIG_INTERCONNECT=y
+CONFIG_INTERCONNECT_QCOM_MSM8916=y
+CONFIG_INTERCONNECT_QCOM_MSM8996=y
+CONFIG_ARM_SCPI_PROTOCOL=y
+CONFIG_RASPBERRYPI_FIRMWARE=y
+CONFIG_EFI_CAPSULE_LOADER=y
+CONFIG_ACPI=y
+CONFIG_ACPI_APEI=y
+CONFIG_ACPI_APEI_GHES=y
+CONFIG_ACPI_APEI_PCIEAER=y
+CONFIG_ACPI_APEI_MEMORY_FAILURE=y
+CONFIG_ACPI_APEI_EINJ=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
+CONFIG_QUOTA=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
+CONFIG_VFAT_FS=y
+CONFIG_HUGETLBFS=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_EFIVAR_FS=y
+CONFIG_SQUASHFS=y
+CONFIG_UFS_FS=y
+CONFIG_UFS_FS_WRITE=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
+CONFIG_9P_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_LOCKUP_DETECTOR=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_FTRACE is not set
+CONFIG_MEMTEST=y
+CONFIG_SECURITY=y
+CONFIG_CRYPTO_ECHAINIV=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA256_ARM64=m
+CONFIG_CRYPTO_SHA512_ARM64=m
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m
+CONFIG_CRYPTO_CRC32_ARM64_CE=m
+CONFIG_CRYPTO_AES_ARM64=m
+CONFIG_CRYPTO_AES_ARM64_CE=m
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=m
+CONFIG_CRYPTO_CHACHA20_NEON=m
+CONFIG_CRYPTO_AES_ARM64_BS=m
+CONFIG_CRYPTO_SHA512_ARM64_CE=m
+CONFIG_CRYPTO_SHA3_ARM64=m
+CONFIG_CRYPTO_SM3_ARM64_CE=m
+CONFIG_QTI_RPMH_MBOX=y
+CONFIG_QTI_RPMH_API=y
+CONFIG_QCOM_COMMAND_DB=y
+CONFIG_REGULATOR_RPMH=y
+CONFIG_QCOM_GENI_SE=y
diff --git a/rr-cache/c15c8898132c15bbe8de0f76d799b1e6eedea5f1/postimage b/rr-cache/c15c8898132c15bbe8de0f76d799b1e6eedea5f1/postimage
new file mode 100644
index 0000000..a1d0198
--- /dev/null
+++ b/rr-cache/c15c8898132c15bbe8de0f76d799b1e6eedea5f1/postimage
@@ -0,0 +1,1299 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm PCIe root complex driver
+ *
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ * Copyright 2015 Linaro Limited.
+ *
+ * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+#define PCIE20_PARF_SYS_CTRL 0x00
+#define MST_WAKEUP_EN BIT(13)
+#define SLV_WAKEUP_EN BIT(12)
+#define MSTR_ACLK_CGC_DIS BIT(10)
+#define SLV_ACLK_CGC_DIS BIT(9)
+#define CORE_CLK_CGC_DIS BIT(6)
+#define AUX_PWR_DET BIT(4)
+#define L23_CLK_RMV_DIS BIT(2)
+#define L1_CLK_RMV_DIS BIT(1)
+
+#define PCIE20_COMMAND_STATUS 0x04
+#define CMD_BME_VAL 0x4
+#define PCIE20_DEVICE_CONTROL2_STATUS2 0x98
+#define PCIE_CAP_CPL_TIMEOUT_DISABLE 0x10
+
+#define PCIE20_PARF_PHY_CTRL 0x40
+#define PCIE20_PARF_PHY_REFCLK 0x4C
+#define PCIE20_PARF_DBI_BASE_ADDR 0x168
+#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
+#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
+#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178
+#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8
+#define PCIE20_PARF_LTSSM 0x1B0
+#define PCIE20_PARF_SID_OFFSET 0x234
+#define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
+
+#define PCIE20_ELBI_SYS_CTRL 0x04
+#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
+
+#define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818
+#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4
+#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5
+#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c
+#define CFG_BRIDGE_SB_INIT BIT(0)
+
+#define PCIE20_CAP 0x70
+#define PCIE20_CAP_LINK_CAPABILITIES (PCIE20_CAP + 0xC)
+#define PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT (BIT(10) | BIT(11))
+#define PCIE20_CAP_LINK_1 (PCIE20_CAP + 0x14)
+#define PCIE_CAP_LINK1_VAL 0x2FD7F
+
+#define PCIE20_PARF_Q2A_FLUSH 0x1AC
+
+#define PCIE20_MISC_CONTROL_1_REG 0x8BC
+#define DBI_RO_WR_EN 1
+
+#define PERST_DELAY_US 1000
+
+#define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358
+#define SLV_ADDR_SPACE_SZ 0x10000000
+
+#define QCOM_PCIE_2_1_0_MAX_SUPPLY 3
+struct qcom_pcie_resources_2_1_0 {
+ struct clk *iface_clk;
+ struct clk *core_clk;
+ struct clk *phy_clk;
+ struct reset_control *pci_reset;
+ struct reset_control *axi_reset;
+ struct reset_control *ahb_reset;
+ struct reset_control *por_reset;
+ struct reset_control *phy_reset;
+ struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
+};
+
+struct qcom_pcie_resources_1_0_0 {
+ struct clk *iface;
+ struct clk *aux;
+ struct clk *master_bus;
+ struct clk *slave_bus;
+ struct reset_control *core;
+ struct regulator *vdda;
+};
+
+#define QCOM_PCIE_2_3_2_MAX_SUPPLY 2
+struct qcom_pcie_resources_2_3_2 {
+ struct clk *aux_clk;
+ struct clk *master_clk;
+ struct clk *slave_clk;
+ struct clk *cfg_clk;
+ struct clk *pipe_clk;
+ struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
+};
+
+struct qcom_pcie_resources_2_4_0 {
+ struct clk *aux_clk;
+ struct clk *master_clk;
+ struct clk *slave_clk;
+ struct reset_control *axi_m_reset;
+ struct reset_control *axi_s_reset;
+ struct reset_control *pipe_reset;
+ struct reset_control *axi_m_vmid_reset;
+ struct reset_control *axi_s_xpu_reset;
+ struct reset_control *parf_reset;
+ struct reset_control *phy_reset;
+ struct reset_control *axi_m_sticky_reset;
+ struct reset_control *pipe_sticky_reset;
+ struct reset_control *pwr_reset;
+ struct reset_control *ahb_reset;
+ struct reset_control *phy_ahb_reset;
+};
+
+struct qcom_pcie_resources_2_3_3 {
+ struct clk *iface;
+ struct clk *axi_m_clk;
+ struct clk *axi_s_clk;
+ struct clk *ahb_clk;
+ struct clk *aux_clk;
+ struct reset_control *rst[7];
+};
+
+union qcom_pcie_resources {
+ struct qcom_pcie_resources_1_0_0 v1_0_0;
+ struct qcom_pcie_resources_2_1_0 v2_1_0;
+ struct qcom_pcie_resources_2_3_2 v2_3_2;
+ struct qcom_pcie_resources_2_3_3 v2_3_3;
+ struct qcom_pcie_resources_2_4_0 v2_4_0;
+};
+
+struct qcom_pcie;
+
+struct qcom_pcie_ops {
+ int (*get_resources)(struct qcom_pcie *pcie);
+ int (*init)(struct qcom_pcie *pcie);
+ int (*post_init)(struct qcom_pcie *pcie);
+ void (*deinit)(struct qcom_pcie *pcie);
+ void (*post_deinit)(struct qcom_pcie *pcie);
+ void (*ltssm_enable)(struct qcom_pcie *pcie);
+};
+
+struct qcom_pcie {
+ struct dw_pcie *pci;
+ void __iomem *parf; /* DT parf */
+ void __iomem *elbi; /* DT elbi */
+ union qcom_pcie_resources res;
+ struct phy *phy;
+ struct gpio_desc *reset;
+ const struct qcom_pcie_ops *ops;
+};
+
+#define to_qcom_pcie(x) dev_get_drvdata((x)->dev)
+
+static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
+{
+ gpiod_set_value_cansleep(pcie->reset, 1);
+ usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
+}
+
+static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
+{
+ gpiod_set_value_cansleep(pcie->reset, 0);
+ usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
+}
+
+static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+
+ if (dw_pcie_link_up(pci))
+ return 0;
+
+ /* Enable Link Training state machine */
+ if (pcie->ops->ltssm_enable)
+ pcie->ops->ltssm_enable(pcie);
+
+ return dw_pcie_wait_for_link(pci);
+}
+
+static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
+{
+ u32 val;
+
+ /* enable link training */
+ val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
+ val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
+ writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
+}
+
+static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ res->supplies[0].supply = "vdda";
+ res->supplies[1].supply = "vdda_phy";
+ res->supplies[2].supply = "vdda_refclk";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
+ res->supplies);
+ if (ret)
+ return ret;
+
+ res->iface_clk = devm_clk_get(dev, "iface");
+ if (IS_ERR(res->iface_clk))
+ return PTR_ERR(res->iface_clk);
+
+ res->core_clk = devm_clk_get(dev, "core");
+ if (IS_ERR(res->core_clk))
+ return PTR_ERR(res->core_clk);
+
+ res->phy_clk = devm_clk_get(dev, "phy");
+ if (IS_ERR(res->phy_clk))
+ return PTR_ERR(res->phy_clk);
+
+ res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
+ if (IS_ERR(res->pci_reset))
+ return PTR_ERR(res->pci_reset);
+
+ res->axi_reset = devm_reset_control_get_exclusive(dev, "axi");
+ if (IS_ERR(res->axi_reset))
+ return PTR_ERR(res->axi_reset);
+
+ res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
+ if (IS_ERR(res->ahb_reset))
+ return PTR_ERR(res->ahb_reset);
+
+ res->por_reset = devm_reset_control_get_exclusive(dev, "por");
+ if (IS_ERR(res->por_reset))
+ return PTR_ERR(res->por_reset);
+
+ res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
+ return PTR_ERR_OR_ZERO(res->phy_reset);
+}
+
+static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
+
+ reset_control_assert(res->pci_reset);
+ reset_control_assert(res->axi_reset);
+ reset_control_assert(res->ahb_reset);
+ reset_control_assert(res->por_reset);
+ reset_control_assert(res->pci_reset);
+ clk_disable_unprepare(res->iface_clk);
+ clk_disable_unprepare(res->core_clk);
+ clk_disable_unprepare(res->phy_clk);
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+}
+
+static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ u32 val;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
+ if (ret < 0) {
+ dev_err(dev, "cannot enable regulators\n");
+ return ret;
+ }
+
+ ret = reset_control_assert(res->ahb_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert ahb reset\n");
+ goto err_assert_ahb;
+ }
+
+ ret = clk_prepare_enable(res->iface_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable iface clock\n");
+ goto err_assert_ahb;
+ }
+
+ ret = clk_prepare_enable(res->phy_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable phy clock\n");
+ goto err_clk_phy;
+ }
+
+ ret = clk_prepare_enable(res->core_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable core clock\n");
+ goto err_clk_core;
+ }
+
+ ret = reset_control_deassert(res->ahb_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert ahb reset\n");
+ goto err_deassert_ahb;
+ }
+
+ /* enable PCIe clocks and resets */
+ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+ val &= ~BIT(0);
+ writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+ /* enable external reference clock */
+ val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
+ val |= BIT(16);
+ writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
+
+ ret = reset_control_deassert(res->phy_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert phy reset\n");
+ return ret;
+ }
+
+ ret = reset_control_deassert(res->pci_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert pci reset\n");
+ return ret;
+ }
+
+ ret = reset_control_deassert(res->por_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert por reset\n");
+ return ret;
+ }
+
+ ret = reset_control_deassert(res->axi_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert axi reset\n");
+ return ret;
+ }
+
+ /* wait for clock acquisition */
+ usleep_range(1000, 1500);
+
+
+ /* Set the Max TLP size to 2K, instead of using default of 4K */
+ writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
+ pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0);
+ writel(CFG_BRIDGE_SB_INIT,
+ pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);
+
+ return 0;
+
+err_deassert_ahb:
+ clk_disable_unprepare(res->core_clk);
+err_clk_core:
+ clk_disable_unprepare(res->phy_clk);
+err_clk_phy:
+ clk_disable_unprepare(res->iface_clk);
+err_assert_ahb:
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+
+ return ret;
+}
+
+static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+
+ res->vdda = devm_regulator_get(dev, "vdda");
+ if (IS_ERR(res->vdda))
+ return PTR_ERR(res->vdda);
+
+ res->iface = devm_clk_get(dev, "iface");
+ if (IS_ERR(res->iface))
+ return PTR_ERR(res->iface);
+
+ res->aux = devm_clk_get(dev, "aux");
+ if (IS_ERR(res->aux))
+ return PTR_ERR(res->aux);
+
+ res->master_bus = devm_clk_get(dev, "master_bus");
+ if (IS_ERR(res->master_bus))
+ return PTR_ERR(res->master_bus);
+
+ res->slave_bus = devm_clk_get(dev, "slave_bus");
+ if (IS_ERR(res->slave_bus))
+ return PTR_ERR(res->slave_bus);
+
+ res->core = devm_reset_control_get_exclusive(dev, "core");
+ return PTR_ERR_OR_ZERO(res->core);
+}
+
+static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
+
+ reset_control_assert(res->core);
+ clk_disable_unprepare(res->slave_bus);
+ clk_disable_unprepare(res->master_bus);
+ clk_disable_unprepare(res->iface);
+ clk_disable_unprepare(res->aux);
+ regulator_disable(res->vdda);
+}
+
+static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ ret = reset_control_deassert(res->core);
+ if (ret) {
+ dev_err(dev, "cannot deassert core reset\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(res->aux);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable aux clock\n");
+ goto err_res;
+ }
+
+ ret = clk_prepare_enable(res->iface);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable iface clock\n");
+ goto err_aux;
+ }
+
+ ret = clk_prepare_enable(res->master_bus);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable master_bus clock\n");
+ goto err_iface;
+ }
+
+ ret = clk_prepare_enable(res->slave_bus);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable slave_bus clock\n");
+ goto err_master;
+ }
+
+ ret = regulator_enable(res->vdda);
+ if (ret) {
+ dev_err(dev, "cannot enable vdda regulator\n");
+ goto err_slave;
+ }
+
+ /* change DBI base address */
+ writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+
+ val |= BIT(31);
+ writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+ }
+
+ return 0;
+err_slave:
+ clk_disable_unprepare(res->slave_bus);
+err_master:
+ clk_disable_unprepare(res->master_bus);
+err_iface:
+ clk_disable_unprepare(res->iface);
+err_aux:
+ clk_disable_unprepare(res->aux);
+err_res:
+ reset_control_assert(res->core);
+
+ return ret;
+}
+
+static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
+{
+ u32 val;
+
+ /* enable link training */
+ val = readl(pcie->parf + PCIE20_PARF_LTSSM);
+ val |= BIT(8);
+ writel(val, pcie->parf + PCIE20_PARF_LTSSM);
+}
+
+static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ res->supplies[0].supply = "vdda";
+ res->supplies[1].supply = "vddpe-3v3";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
+ res->supplies);
+ if (ret)
+ return ret;
+
+ res->aux_clk = devm_clk_get(dev, "aux");
+ if (IS_ERR(res->aux_clk))
+ return PTR_ERR(res->aux_clk);
+
+ res->cfg_clk = devm_clk_get(dev, "cfg");
+ if (IS_ERR(res->cfg_clk))
+ return PTR_ERR(res->cfg_clk);
+
+ res->master_clk = devm_clk_get(dev, "bus_master");
+ if (IS_ERR(res->master_clk))
+ return PTR_ERR(res->master_clk);
+
+ res->slave_clk = devm_clk_get(dev, "bus_slave");
+ if (IS_ERR(res->slave_clk))
+ return PTR_ERR(res->slave_clk);
+
+ res->pipe_clk = devm_clk_get(dev, "pipe");
+ return PTR_ERR_OR_ZERO(res->pipe_clk);
+}
+
+static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
+
+ clk_disable_unprepare(res->slave_clk);
+ clk_disable_unprepare(res->master_clk);
+ clk_disable_unprepare(res->cfg_clk);
+ clk_disable_unprepare(res->aux_clk);
+
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+}
+
+static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
+
+ clk_disable_unprepare(res->pipe_clk);
+}
+
+static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ u32 val;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
+ if (ret < 0) {
+ dev_err(dev, "cannot enable regulators\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(res->aux_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable aux clock\n");
+ goto err_aux_clk;
+ }
+
+ ret = clk_prepare_enable(res->cfg_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable cfg clock\n");
+ goto err_cfg_clk;
+ }
+
+ ret = clk_prepare_enable(res->master_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable master clock\n");
+ goto err_master_clk;
+ }
+
+ ret = clk_prepare_enable(res->slave_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable slave clock\n");
+ goto err_slave_clk;
+ }
+
+ /* enable PCIe clocks and resets */
+ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+ val &= ~BIT(0);
+ writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+ /* change DBI base address */
+ writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+ /* MAC PHY_POWERDOWN MUX DISABLE */
+ val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
+ val &= ~BIT(29);
+ writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
+
+ val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+ val |= BIT(4);
+ writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+
+ val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+ val |= BIT(31);
+ writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+
+ return 0;
+
+err_slave_clk:
+ clk_disable_unprepare(res->master_clk);
+err_master_clk:
+ clk_disable_unprepare(res->cfg_clk);
+err_cfg_clk:
+ clk_disable_unprepare(res->aux_clk);
+
+err_aux_clk:
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+
+ return ret;
+}
+
+static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ ret = clk_prepare_enable(res->pipe_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable pipe clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+
+ res->aux_clk = devm_clk_get(dev, "aux");
+ if (IS_ERR(res->aux_clk))
+ return PTR_ERR(res->aux_clk);
+
+ res->master_clk = devm_clk_get(dev, "master_bus");
+ if (IS_ERR(res->master_clk))
+ return PTR_ERR(res->master_clk);
+
+ res->slave_clk = devm_clk_get(dev, "slave_bus");
+ if (IS_ERR(res->slave_clk))
+ return PTR_ERR(res->slave_clk);
+
+ res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m");
+ if (IS_ERR(res->axi_m_reset))
+ return PTR_ERR(res->axi_m_reset);
+
+ res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s");
+ if (IS_ERR(res->axi_s_reset))
+ return PTR_ERR(res->axi_s_reset);
+
+ res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe");
+ if (IS_ERR(res->pipe_reset))
+ return PTR_ERR(res->pipe_reset);
+
+ res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev,
+ "axi_m_vmid");
+ if (IS_ERR(res->axi_m_vmid_reset))
+ return PTR_ERR(res->axi_m_vmid_reset);
+
+ res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev,
+ "axi_s_xpu");
+ if (IS_ERR(res->axi_s_xpu_reset))
+ return PTR_ERR(res->axi_s_xpu_reset);
+
+ res->parf_reset = devm_reset_control_get_exclusive(dev, "parf");
+ if (IS_ERR(res->parf_reset))
+ return PTR_ERR(res->parf_reset);
+
+ res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
+ if (IS_ERR(res->phy_reset))
+ return PTR_ERR(res->phy_reset);
+
+ res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev,
+ "axi_m_sticky");
+ if (IS_ERR(res->axi_m_sticky_reset))
+ return PTR_ERR(res->axi_m_sticky_reset);
+
+ res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev,
+ "pipe_sticky");
+ if (IS_ERR(res->pipe_sticky_reset))
+ return PTR_ERR(res->pipe_sticky_reset);
+
+ res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr");
+ if (IS_ERR(res->pwr_reset))
+ return PTR_ERR(res->pwr_reset);
+
+ res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
+ if (IS_ERR(res->ahb_reset))
+ return PTR_ERR(res->ahb_reset);
+
+ res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb");
+ if (IS_ERR(res->phy_ahb_reset))
+ return PTR_ERR(res->phy_ahb_reset);
+
+ return 0;
+}
+
+static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
+
+ reset_control_assert(res->axi_m_reset);
+ reset_control_assert(res->axi_s_reset);
+ reset_control_assert(res->pipe_reset);
+ reset_control_assert(res->pipe_sticky_reset);
+ reset_control_assert(res->phy_reset);
+ reset_control_assert(res->phy_ahb_reset);
+ reset_control_assert(res->axi_m_sticky_reset);
+ reset_control_assert(res->pwr_reset);
+ reset_control_assert(res->ahb_reset);
+ clk_disable_unprepare(res->aux_clk);
+ clk_disable_unprepare(res->master_clk);
+ clk_disable_unprepare(res->slave_clk);
+}
+
+static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ u32 val;
+ int ret;
+
+ ret = reset_control_assert(res->axi_m_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert axi master reset\n");
+ return ret;
+ }
+
+ ret = reset_control_assert(res->axi_s_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert axi slave reset\n");
+ return ret;
+ }
+
+ usleep_range(10000, 12000);
+
+ ret = reset_control_assert(res->pipe_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert pipe reset\n");
+ return ret;
+ }
+
+ ret = reset_control_assert(res->pipe_sticky_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert pipe sticky reset\n");
+ return ret;
+ }
+
+ ret = reset_control_assert(res->phy_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert phy reset\n");
+ return ret;
+ }
+
+ ret = reset_control_assert(res->phy_ahb_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert phy ahb reset\n");
+ return ret;
+ }
+
+ usleep_range(10000, 12000);
+
+ ret = reset_control_assert(res->axi_m_sticky_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert axi master sticky reset\n");
+ return ret;
+ }
+
+ ret = reset_control_assert(res->pwr_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert power reset\n");
+ return ret;
+ }
+
+ ret = reset_control_assert(res->ahb_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert ahb reset\n");
+ return ret;
+ }
+
+ usleep_range(10000, 12000);
+
+ ret = reset_control_deassert(res->phy_ahb_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert phy ahb reset\n");
+ return ret;
+ }
+
+ ret = reset_control_deassert(res->phy_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert phy reset\n");
+ goto err_rst_phy;
+ }
+
+ ret = reset_control_deassert(res->pipe_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert pipe reset\n");
+ goto err_rst_pipe;
+ }
+
+ ret = reset_control_deassert(res->pipe_sticky_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert pipe sticky reset\n");
+ goto err_rst_pipe_sticky;
+ }
+
+ usleep_range(10000, 12000);
+
+ ret = reset_control_deassert(res->axi_m_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert axi master reset\n");
+ goto err_rst_axi_m;
+ }
+
+ ret = reset_control_deassert(res->axi_m_sticky_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert axi master sticky reset\n");
+ goto err_rst_axi_m_sticky;
+ }
+
+ ret = reset_control_deassert(res->axi_s_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert axi slave reset\n");
+ goto err_rst_axi_s;
+ }
+
+ ret = reset_control_deassert(res->pwr_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert power reset\n");
+ goto err_rst_pwr;
+ }
+
+ ret = reset_control_deassert(res->ahb_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert ahb reset\n");
+ goto err_rst_ahb;
+ }
+
+ usleep_range(10000, 12000);
+
+ ret = clk_prepare_enable(res->aux_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable iface clock\n");
+ goto err_clk_aux;
+ }
+
+ ret = clk_prepare_enable(res->master_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable core clock\n");
+ goto err_clk_axi_m;
+ }
+
+ ret = clk_prepare_enable(res->slave_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable phy clock\n");
+ goto err_clk_axi_s;
+ }
+
+ /* enable PCIe clocks and resets */
+ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+ val &= ~BIT(0);
+ writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+ /* change DBI base address */
+ writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+ /* MAC PHY_POWERDOWN MUX DISABLE */
+ val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
+ val &= ~BIT(29);
+ writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
+
+ val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+ val |= BIT(4);
+ writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+
+ val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+ val |= BIT(31);
+ writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+
+ return 0;
+
+err_clk_axi_s:
+ clk_disable_unprepare(res->master_clk);
+err_clk_axi_m:
+ clk_disable_unprepare(res->aux_clk);
+err_clk_aux:
+ reset_control_assert(res->ahb_reset);
+err_rst_ahb:
+ reset_control_assert(res->pwr_reset);
+err_rst_pwr:
+ reset_control_assert(res->axi_s_reset);
+err_rst_axi_s:
+ reset_control_assert(res->axi_m_sticky_reset);
+err_rst_axi_m_sticky:
+ reset_control_assert(res->axi_m_reset);
+err_rst_axi_m:
+ reset_control_assert(res->pipe_sticky_reset);
+err_rst_pipe_sticky:
+ reset_control_assert(res->pipe_reset);
+err_rst_pipe:
+ reset_control_assert(res->phy_reset);
+err_rst_phy:
+ reset_control_assert(res->phy_ahb_reset);
+ return ret;
+}
+
+static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int i;
+ const char *rst_names[] = { "axi_m", "axi_s", "pipe",
+ "axi_m_sticky", "sticky",
+ "ahb", "sleep", };
+
+ res->iface = devm_clk_get(dev, "iface");
+ if (IS_ERR(res->iface))
+ return PTR_ERR(res->iface);
+
+ res->axi_m_clk = devm_clk_get(dev, "axi_m");
+ if (IS_ERR(res->axi_m_clk))
+ return PTR_ERR(res->axi_m_clk);
+
+ res->axi_s_clk = devm_clk_get(dev, "axi_s");
+ if (IS_ERR(res->axi_s_clk))
+ return PTR_ERR(res->axi_s_clk);
+
+ res->ahb_clk = devm_clk_get(dev, "ahb");
+ if (IS_ERR(res->ahb_clk))
+ return PTR_ERR(res->ahb_clk);
+
+ res->aux_clk = devm_clk_get(dev, "aux");
+ if (IS_ERR(res->aux_clk))
+ return PTR_ERR(res->aux_clk);
+
+ for (i = 0; i < ARRAY_SIZE(rst_names); i++) {
+ res->rst[i] = devm_reset_control_get(dev, rst_names[i]);
+ if (IS_ERR(res->rst[i]))
+ return PTR_ERR(res->rst[i]);
+ }
+
+ return 0;
+}
+
+static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
+
+ clk_disable_unprepare(res->iface);
+ clk_disable_unprepare(res->axi_m_clk);
+ clk_disable_unprepare(res->axi_s_clk);
+ clk_disable_unprepare(res->ahb_clk);
+ clk_disable_unprepare(res->aux_clk);
+}
+
+static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int i, ret;
+ u32 val;
+
+ for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
+ ret = reset_control_assert(res->rst[i]);
+ if (ret) {
+ dev_err(dev, "reset #%d assert failed (%d)\n", i, ret);
+ return ret;
+ }
+ }
+
+ usleep_range(2000, 2500);
+
+ for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
+ ret = reset_control_deassert(res->rst[i]);
+ if (ret) {
+ dev_err(dev, "reset #%d deassert failed (%d)\n", i,
+ ret);
+ return ret;
+ }
+ }
+
+ /*
+ * Don't have a way to see if the reset has completed.
+ * Wait for some time.
+ */
+ usleep_range(2000, 2500);
+
+ ret = clk_prepare_enable(res->iface);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable core clock\n");
+ goto err_clk_iface;
+ }
+
+ ret = clk_prepare_enable(res->axi_m_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable core clock\n");
+ goto err_clk_axi_m;
+ }
+
+ ret = clk_prepare_enable(res->axi_s_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable axi slave clock\n");
+ goto err_clk_axi_s;
+ }
+
+ ret = clk_prepare_enable(res->ahb_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable ahb clock\n");
+ goto err_clk_ahb;
+ }
+
+ ret = clk_prepare_enable(res->aux_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable aux clock\n");
+ goto err_clk_aux;
+ }
+
+ writel(SLV_ADDR_SPACE_SZ,
+ pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE);
+
+ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+ val &= ~BIT(0);
+ writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+ writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+ writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
+ | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
+ AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
+ pcie->parf + PCIE20_PARF_SYS_CTRL);
+ writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH);
+
+ writel(CMD_BME_VAL, pci->dbi_base + PCIE20_COMMAND_STATUS);
+ writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG);
+ writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + PCIE20_CAP_LINK_1);
+
+ val = readl(pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
+ val &= ~PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT;
+ writel(val, pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
+
+ writel(PCIE_CAP_CPL_TIMEOUT_DISABLE, pci->dbi_base +
+ PCIE20_DEVICE_CONTROL2_STATUS2);
+
+ return 0;
+
+err_clk_aux:
+ clk_disable_unprepare(res->ahb_clk);
+err_clk_ahb:
+ clk_disable_unprepare(res->axi_s_clk);
+err_clk_axi_s:
+ clk_disable_unprepare(res->axi_m_clk);
+err_clk_axi_m:
+ clk_disable_unprepare(res->iface);
+err_clk_iface:
+ /*
+ * Not checking for failure, will anyway return
+ * the original failure in 'ret'.
+ */
+ for (i = 0; i < ARRAY_SIZE(res->rst); i++)
+ reset_control_assert(res->rst[i]);
+
+ return ret;
+}
+
+static int qcom_pcie_link_up(struct dw_pcie *pci)
+{
+ u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
+
+ return !!(val & PCI_EXP_LNKSTA_DLLLA);
+}
+
+static int qcom_pcie_host_init(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct qcom_pcie *pcie = to_qcom_pcie(pci);
+ int ret;
+
+ pm_runtime_get_sync(pci->dev);
+ qcom_ep_reset_assert(pcie);
+
+ ret = pcie->ops->init(pcie);
+ if (ret)
+ return ret;
+
+ ret = phy_power_on(pcie->phy);
+ if (ret)
+ goto err_deinit;
+
+ if (pcie->ops->post_init) {
+ ret = pcie->ops->post_init(pcie);
+ if (ret)
+ goto err_disable_phy;
+ }
+
+ dw_pcie_setup_rc(pp);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ dw_pcie_msi_init(pp);
+
+ qcom_ep_reset_deassert(pcie);
+
+ ret = qcom_pcie_establish_link(pcie);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ qcom_ep_reset_assert(pcie);
+ if (pcie->ops->post_deinit)
+ pcie->ops->post_deinit(pcie);
+err_disable_phy:
+ phy_power_off(pcie->phy);
+err_deinit:
+ pcie->ops->deinit(pcie);
+ pm_runtime_put(pci->dev);
+
+ return ret;
+}
+
+static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
+ u32 *val)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
+ /* the device class is not reported correctly from the register */
+ if (where == PCI_CLASS_REVISION && size == 4) {
+ *val = readl(pci->dbi_base + PCI_CLASS_REVISION);
+ *val &= 0xff; /* keep revision id */
+ *val |= PCI_CLASS_BRIDGE_PCI << 16;
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ return dw_pcie_read(pci->dbi_base + where, size, val);
+}
+
+static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
+ .host_init = qcom_pcie_host_init,
+ .rd_own_conf = qcom_pcie_rd_own_conf,
+};
+
+/* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */
+static const struct qcom_pcie_ops ops_2_1_0 = {
+ .get_resources = qcom_pcie_get_resources_2_1_0,
+ .init = qcom_pcie_init_2_1_0,
+ .deinit = qcom_pcie_deinit_2_1_0,
+ .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
+};
+
+/* Qcom IP rev.: 1.0.0 Synopsys IP rev.: 4.11a */
+static const struct qcom_pcie_ops ops_1_0_0 = {
+ .get_resources = qcom_pcie_get_resources_1_0_0,
+ .init = qcom_pcie_init_1_0_0,
+ .deinit = qcom_pcie_deinit_1_0_0,
+ .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
+};
+
+/* Qcom IP rev.: 2.3.2 Synopsys IP rev.: 4.21a */
+static const struct qcom_pcie_ops ops_2_3_2 = {
+ .get_resources = qcom_pcie_get_resources_2_3_2,
+ .init = qcom_pcie_init_2_3_2,
+ .post_init = qcom_pcie_post_init_2_3_2,
+ .deinit = qcom_pcie_deinit_2_3_2,
+ .post_deinit = qcom_pcie_post_deinit_2_3_2,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+};
+
+/* Qcom IP rev.: 2.4.0 Synopsys IP rev.: 4.20a */
+static const struct qcom_pcie_ops ops_2_4_0 = {
+ .get_resources = qcom_pcie_get_resources_2_4_0,
+ .init = qcom_pcie_init_2_4_0,
+ .deinit = qcom_pcie_deinit_2_4_0,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+};
+
+/* Qcom IP rev.: 2.3.3 Synopsys IP rev.: 4.30a */
+static const struct qcom_pcie_ops ops_2_3_3 = {
+ .get_resources = qcom_pcie_get_resources_2_3_3,
+ .init = qcom_pcie_init_2_3_3,
+ .deinit = qcom_pcie_deinit_2_3_3,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+};
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .link_up = qcom_pcie_link_up,
+};
+
+static int qcom_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct pcie_port *pp;
+ struct dw_pcie *pci;
+ struct qcom_pcie *pcie;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ pm_runtime_enable(dev);
+ pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
+ pp = &pci->pp;
+
+ pcie->pci = pci;
+
+ pcie->ops = of_device_get_match_data(dev);
+
+ pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW);
+ if (IS_ERR(pcie->reset))
+ return PTR_ERR(pcie->reset);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf");
+ pcie->parf = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pcie->parf))
+ return PTR_ERR(pcie->parf);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
+ pcie->elbi = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pcie->elbi))
+ return PTR_ERR(pcie->elbi);
+
+ pcie->phy = devm_phy_optional_get(dev, "pciephy");
+ if (IS_ERR(pcie->phy))
+ return PTR_ERR(pcie->phy);
+
+ ret = pcie->ops->get_resources(pcie);
+ if (ret)
+ return ret;
+
+ pp->root_bus_nr = -1;
+ pp->ops = &qcom_pcie_dw_ops;
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ pp->msi_irq = platform_get_irq_byname(pdev, "msi");
+ if (pp->msi_irq < 0)
+ return pp->msi_irq;
+ }
+
+ ret = phy_init(pcie->phy);
+ if (ret) {
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, pcie);
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "cannot initialize host\n");
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id qcom_pcie_match[] = {
+ { .compatible = "qcom,pcie-apq8084", .data = &ops_1_0_0 },
+ { .compatible = "qcom,pcie-ipq8064", .data = &ops_2_1_0 },
+ { .compatible = "qcom,pcie-apq8064", .data = &ops_2_1_0 },
+ { .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 },
+ { .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 },
+ { .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 },
+ { }
+};
+
+static struct platform_driver qcom_pcie_driver = {
+ .probe = qcom_pcie_probe,
+ .driver = {
+ .name = "qcom-pcie",
+ .suppress_bind_attrs = true,
+ .of_match_table = qcom_pcie_match,
+ },
+};
+builtin_platform_driver(qcom_pcie_driver);
diff --git a/rr-cache/c15c8898132c15bbe8de0f76d799b1e6eedea5f1/preimage b/rr-cache/c15c8898132c15bbe8de0f76d799b1e6eedea5f1/preimage
new file mode 100644
index 0000000..328ecf4
--- /dev/null
+++ b/rr-cache/c15c8898132c15bbe8de0f76d799b1e6eedea5f1/preimage
@@ -0,0 +1,1328 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm PCIe root complex driver
+ *
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ * Copyright 2015 Linaro Limited.
+ *
+ * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+#define PCIE20_PARF_SYS_CTRL 0x00
+#define MST_WAKEUP_EN BIT(13)
+#define SLV_WAKEUP_EN BIT(12)
+#define MSTR_ACLK_CGC_DIS BIT(10)
+#define SLV_ACLK_CGC_DIS BIT(9)
+#define CORE_CLK_CGC_DIS BIT(6)
+#define AUX_PWR_DET BIT(4)
+#define L23_CLK_RMV_DIS BIT(2)
+#define L1_CLK_RMV_DIS BIT(1)
+
+#define PCIE20_COMMAND_STATUS 0x04
+#define CMD_BME_VAL 0x4
+#define PCIE20_DEVICE_CONTROL2_STATUS2 0x98
+#define PCIE_CAP_CPL_TIMEOUT_DISABLE 0x10
+
+#define PCIE20_PARF_PHY_CTRL 0x40
+#define PCIE20_PARF_PHY_REFCLK 0x4C
+#define PCIE20_PARF_DBI_BASE_ADDR 0x168
+#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
+#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
+#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178
+#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8
+#define PCIE20_PARF_LTSSM 0x1B0
+#define PCIE20_PARF_SID_OFFSET 0x234
+#define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
+
+#define PCIE20_ELBI_SYS_CTRL 0x04
+#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
+
+#define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818
+#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4
+#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5
+#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c
+#define CFG_BRIDGE_SB_INIT BIT(0)
+
+#define PCIE20_CAP 0x70
+#define PCIE20_CAP_LINK_CAPABILITIES (PCIE20_CAP + 0xC)
+#define PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT (BIT(10) | BIT(11))
+#define PCIE20_CAP_LINK_1 (PCIE20_CAP + 0x14)
+#define PCIE_CAP_LINK1_VAL 0x2FD7F
+
+#define PCIE20_PARF_Q2A_FLUSH 0x1AC
+
+#define PCIE20_MISC_CONTROL_1_REG 0x8BC
+#define DBI_RO_WR_EN 1
+
+#define PERST_DELAY_US 1000
+
+#define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358
+#define SLV_ADDR_SPACE_SZ 0x10000000
+
+#define QCOM_PCIE_2_1_0_MAX_SUPPLY 3
+struct qcom_pcie_resources_2_1_0 {
+ struct clk *iface_clk;
+ struct clk *core_clk;
+ struct clk *ref_clk;
+ struct clk *phy_clk;
+ struct reset_control *pci_reset;
+ struct reset_control *axi_reset;
+ struct reset_control *ahb_reset;
+ struct reset_control *por_reset;
+ struct reset_control *phy_reset;
+ struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
+};
+
+struct qcom_pcie_resources_1_0_0 {
+ struct clk *iface;
+ struct clk *aux;
+ struct clk *master_bus;
+ struct clk *slave_bus;
+ struct reset_control *core;
+ struct regulator *vdda;
+};
+
+#define QCOM_PCIE_2_3_2_MAX_SUPPLY 2
+struct qcom_pcie_resources_2_3_2 {
+ struct clk *aux_clk;
+ struct clk *master_clk;
+ struct clk *slave_clk;
+ struct clk *cfg_clk;
+ struct clk *pipe_clk;
+ struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
+};
+
+struct qcom_pcie_resources_2_4_0 {
+ struct clk *aux_clk;
+ struct clk *master_clk;
+ struct clk *slave_clk;
+ struct reset_control *axi_m_reset;
+ struct reset_control *axi_s_reset;
+ struct reset_control *pipe_reset;
+ struct reset_control *axi_m_vmid_reset;
+ struct reset_control *axi_s_xpu_reset;
+ struct reset_control *parf_reset;
+ struct reset_control *phy_reset;
+ struct reset_control *axi_m_sticky_reset;
+ struct reset_control *pipe_sticky_reset;
+ struct reset_control *pwr_reset;
+ struct reset_control *ahb_reset;
+ struct reset_control *phy_ahb_reset;
+};
+
+struct qcom_pcie_resources_2_3_3 {
+ struct clk *iface;
+ struct clk *axi_m_clk;
+ struct clk *axi_s_clk;
+ struct clk *ahb_clk;
+ struct clk *aux_clk;
+ struct reset_control *rst[7];
+};
+
+union qcom_pcie_resources {
+ struct qcom_pcie_resources_1_0_0 v1_0_0;
+ struct qcom_pcie_resources_2_1_0 v2_1_0;
+ struct qcom_pcie_resources_2_3_2 v2_3_2;
+ struct qcom_pcie_resources_2_3_3 v2_3_3;
+ struct qcom_pcie_resources_2_4_0 v2_4_0;
+};
+
+struct qcom_pcie;
+
+struct qcom_pcie_ops {
+ int (*get_resources)(struct qcom_pcie *pcie);
+ int (*init)(struct qcom_pcie *pcie);
+ int (*post_init)(struct qcom_pcie *pcie);
+ void (*deinit)(struct qcom_pcie *pcie);
+ void (*post_deinit)(struct qcom_pcie *pcie);
+ void (*ltssm_enable)(struct qcom_pcie *pcie);
+};
+
+struct qcom_pcie {
+ struct dw_pcie *pci;
+ void __iomem *parf; /* DT parf */
+ void __iomem *elbi; /* DT elbi */
+ union qcom_pcie_resources res;
+ struct phy *phy;
+ struct gpio_desc *reset;
+ const struct qcom_pcie_ops *ops;
+};
+
+#define to_qcom_pcie(x) dev_get_drvdata((x)->dev)
+
+static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
+{
+ gpiod_set_value_cansleep(pcie->reset, 1);
+ usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
+}
+
+static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
+{
+ gpiod_set_value_cansleep(pcie->reset, 0);
+ usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
+}
+
+static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+
+ if (dw_pcie_link_up(pci))
+ return 0;
+
+ /* Enable Link Training state machine */
+ if (pcie->ops->ltssm_enable)
+ pcie->ops->ltssm_enable(pcie);
+
+ return dw_pcie_wait_for_link(pci);
+}
+
+static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
+{
+ u32 val;
+
+ /* enable link training */
+ val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
+ val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
+ writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
+}
+
+static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ res->supplies[0].supply = "vdda";
+ res->supplies[1].supply = "vdda_phy";
+ res->supplies[2].supply = "vdda_refclk";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
+ res->supplies);
+ if (ret)
+ return ret;
+
+ res->iface_clk = devm_clk_get(dev, "iface");
+ if (IS_ERR(res->iface_clk))
+ return PTR_ERR(res->iface_clk);
+
+ res->ref_clk = devm_clk_get(dev, "ref");
+
+ if (IS_ERR(res->ref_clk)) {
+ if (PTR_ERR(res->ref_clk) == -EPROBE_DEFER)
+ return PTR_ERR(res->ref_clk);
+
+ res->ref_clk = NULL;
+ }
+
+ res->core_clk = devm_clk_get(dev, "core");
+ if (IS_ERR(res->core_clk))
+ return PTR_ERR(res->core_clk);
+
+ res->phy_clk = devm_clk_get(dev, "phy");
+ if (IS_ERR(res->phy_clk))
+ return PTR_ERR(res->phy_clk);
+
+ res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
+ if (IS_ERR(res->pci_reset))
+ return PTR_ERR(res->pci_reset);
+
+ res->axi_reset = devm_reset_control_get_exclusive(dev, "axi");
+ if (IS_ERR(res->axi_reset))
+ return PTR_ERR(res->axi_reset);
+
+ res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
+ if (IS_ERR(res->ahb_reset))
+ return PTR_ERR(res->ahb_reset);
+
+ res->por_reset = devm_reset_control_get_exclusive(dev, "por");
+ if (IS_ERR(res->por_reset))
+ return PTR_ERR(res->por_reset);
+
+ res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
+ return PTR_ERR_OR_ZERO(res->phy_reset);
+}
+
+static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
+
+ reset_control_assert(res->pci_reset);
+ reset_control_assert(res->axi_reset);
+ reset_control_assert(res->ahb_reset);
+ reset_control_assert(res->por_reset);
+ reset_control_assert(res->pci_reset);
+ clk_disable_unprepare(res->iface_clk);
+ clk_disable_unprepare(res->ref_clk);
+ clk_disable_unprepare(res->core_clk);
+ clk_disable_unprepare(res->phy_clk);
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+}
+
+static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ u32 val;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
+ if (ret < 0) {
+ dev_err(dev, "cannot enable regulators\n");
+ return ret;
+ }
+
+ ret = reset_control_assert(res->ahb_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert ahb reset\n");
+ goto err_assert_ahb;
+ }
+
+ ret = clk_prepare_enable(res->ref_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable ref clock\n");
+ goto err_assert_ahb;
+ }
+
+ ret = clk_prepare_enable(res->iface_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable iface clock\n");
+ goto err_clk_iface;
+ }
+
+ ret = clk_prepare_enable(res->phy_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable phy clock\n");
+ goto err_clk_phy;
+ }
+
+ ret = clk_prepare_enable(res->core_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable core clock\n");
+ goto err_clk_core;
+ }
+
+ ret = reset_control_deassert(res->ahb_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert ahb reset\n");
+ goto err_deassert_ahb;
+ }
+
+ /* enable PCIe clocks and resets */
+ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+ val &= ~BIT(0);
+ writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+ /* enable external reference clock */
+ val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
+ val |= BIT(16);
+ writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
+
+ ret = reset_control_deassert(res->phy_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert phy reset\n");
+ return ret;
+ }
+
+ ret = reset_control_deassert(res->pci_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert pci reset\n");
+ return ret;
+ }
+
+ ret = reset_control_deassert(res->por_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert por reset\n");
+ return ret;
+ }
+
+ ret = reset_control_deassert(res->axi_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert axi reset\n");
+ return ret;
+ }
+
+ /* wait for clock acquisition */
+ usleep_range(1000, 1500);
+
+
+ /* Set the Max TLP size to 2K, instead of using default of 4K */
+ writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
+ pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0);
+ writel(CFG_BRIDGE_SB_INIT,
+ pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);
+
+ return 0;
+
+err_deassert_ahb:
+ clk_disable_unprepare(res->core_clk);
+err_clk_core:
+ clk_disable_unprepare(res->phy_clk);
+err_clk_phy:
+ clk_disable_unprepare(res->iface_clk);
+err_clk_iface:
+ clk_disable_unprepare(res->ref_clk);
+err_assert_ahb:
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+
+ return ret;
+}
+
+static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+
+ res->vdda = devm_regulator_get(dev, "vdda");
+ if (IS_ERR(res->vdda))
+ return PTR_ERR(res->vdda);
+
+ res->iface = devm_clk_get(dev, "iface");
+ if (IS_ERR(res->iface))
+ return PTR_ERR(res->iface);
+
+ res->aux = devm_clk_get(dev, "aux");
+ if (IS_ERR(res->aux))
+ return PTR_ERR(res->aux);
+
+ res->master_bus = devm_clk_get(dev, "master_bus");
+ if (IS_ERR(res->master_bus))
+ return PTR_ERR(res->master_bus);
+
+ res->slave_bus = devm_clk_get(dev, "slave_bus");
+ if (IS_ERR(res->slave_bus))
+ return PTR_ERR(res->slave_bus);
+
+ res->core = devm_reset_control_get_exclusive(dev, "core");
+ return PTR_ERR_OR_ZERO(res->core);
+}
+
+static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
+
+ reset_control_assert(res->core);
+ clk_disable_unprepare(res->slave_bus);
+ clk_disable_unprepare(res->master_bus);
+ clk_disable_unprepare(res->iface);
+ clk_disable_unprepare(res->aux);
+ regulator_disable(res->vdda);
+}
+
+static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ ret = reset_control_deassert(res->core);
+ if (ret) {
+ dev_err(dev, "cannot deassert core reset\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(res->aux);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable aux clock\n");
+ goto err_res;
+ }
+
+ ret = clk_prepare_enable(res->iface);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable iface clock\n");
+ goto err_aux;
+ }
+
+ ret = clk_prepare_enable(res->master_bus);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable master_bus clock\n");
+ goto err_iface;
+ }
+
+ ret = clk_prepare_enable(res->slave_bus);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable slave_bus clock\n");
+ goto err_master;
+ }
+
+ ret = regulator_enable(res->vdda);
+ if (ret) {
+ dev_err(dev, "cannot enable vdda regulator\n");
+ goto err_slave;
+ }
+
+ /* change DBI base address */
+ writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+
+ val |= BIT(31);
+ writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+ }
+
+ return 0;
+err_slave:
+ clk_disable_unprepare(res->slave_bus);
+err_master:
+ clk_disable_unprepare(res->master_bus);
+err_iface:
+ clk_disable_unprepare(res->iface);
+err_aux:
+ clk_disable_unprepare(res->aux);
+err_res:
+ reset_control_assert(res->core);
+
+ return ret;
+}
+
+static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
+{
+ u32 val;
+
+ /* enable link training */
+ val = readl(pcie->parf + PCIE20_PARF_LTSSM);
+ val |= BIT(8);
+ writel(val, pcie->parf + PCIE20_PARF_LTSSM);
+}
+
+static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ res->supplies[0].supply = "vdda";
+ res->supplies[1].supply = "vddpe-3v3";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
+ res->supplies);
+ if (ret)
+ return ret;
+
+ res->aux_clk = devm_clk_get(dev, "aux");
+ if (IS_ERR(res->aux_clk))
+ return PTR_ERR(res->aux_clk);
+
+ res->cfg_clk = devm_clk_get(dev, "cfg");
+ if (IS_ERR(res->cfg_clk))
+ return PTR_ERR(res->cfg_clk);
+
+ res->master_clk = devm_clk_get(dev, "bus_master");
+ if (IS_ERR(res->master_clk))
+ return PTR_ERR(res->master_clk);
+
+ res->slave_clk = devm_clk_get(dev, "bus_slave");
+ if (IS_ERR(res->slave_clk))
+ return PTR_ERR(res->slave_clk);
+
+ res->pipe_clk = devm_clk_get(dev, "pipe");
+ return PTR_ERR_OR_ZERO(res->pipe_clk);
+}
+
+static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
+
+ clk_disable_unprepare(res->slave_clk);
+ clk_disable_unprepare(res->master_clk);
+ clk_disable_unprepare(res->cfg_clk);
+ clk_disable_unprepare(res->aux_clk);
+
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+}
+
+static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
+
+ clk_disable_unprepare(res->pipe_clk);
+}
+
+static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ u32 val;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
+ if (ret < 0) {
+ dev_err(dev, "cannot enable regulators\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(res->aux_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable aux clock\n");
+ goto err_aux_clk;
+ }
+
+ ret = clk_prepare_enable(res->cfg_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable cfg clock\n");
+ goto err_cfg_clk;
+ }
+
+ ret = clk_prepare_enable(res->master_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable master clock\n");
+ goto err_master_clk;
+ }
+
+ ret = clk_prepare_enable(res->slave_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable slave clock\n");
+ goto err_slave_clk;
+ }
+
+ /* enable PCIe clocks and resets */
+ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+ val &= ~BIT(0);
+ writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+ /* change DBI base address */
+ writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+ /* MAC PHY_POWERDOWN MUX DISABLE */
+ val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
+ val &= ~BIT(29);
+ writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
+
+ val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+ val |= BIT(4);
+ writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+
+ val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+ val |= BIT(31);
+ writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+
+ return 0;
+
+err_slave_clk:
+ clk_disable_unprepare(res->master_clk);
+err_master_clk:
+ clk_disable_unprepare(res->cfg_clk);
+err_cfg_clk:
+ clk_disable_unprepare(res->aux_clk);
+
+err_aux_clk:
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+
+ return ret;
+}
+
+static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ ret = clk_prepare_enable(res->pipe_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable pipe clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+
+ res->aux_clk = devm_clk_get(dev, "aux");
+ if (IS_ERR(res->aux_clk))
+ return PTR_ERR(res->aux_clk);
+
+ res->master_clk = devm_clk_get(dev, "master_bus");
+ if (IS_ERR(res->master_clk))
+ return PTR_ERR(res->master_clk);
+
+ res->slave_clk = devm_clk_get(dev, "slave_bus");
+ if (IS_ERR(res->slave_clk))
+ return PTR_ERR(res->slave_clk);
+
+ res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m");
+ if (IS_ERR(res->axi_m_reset))
+ return PTR_ERR(res->axi_m_reset);
+
+ res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s");
+ if (IS_ERR(res->axi_s_reset))
+ return PTR_ERR(res->axi_s_reset);
+
+ res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe");
+ if (IS_ERR(res->pipe_reset))
+ return PTR_ERR(res->pipe_reset);
+
+ res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev,
+ "axi_m_vmid");
+ if (IS_ERR(res->axi_m_vmid_reset))
+ return PTR_ERR(res->axi_m_vmid_reset);
+
+ res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev,
+ "axi_s_xpu");
+ if (IS_ERR(res->axi_s_xpu_reset))
+ return PTR_ERR(res->axi_s_xpu_reset);
+
+ res->parf_reset = devm_reset_control_get_exclusive(dev, "parf");
+ if (IS_ERR(res->parf_reset))
+ return PTR_ERR(res->parf_reset);
+
+ res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
+ if (IS_ERR(res->phy_reset))
+ return PTR_ERR(res->phy_reset);
+
+ res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev,
+ "axi_m_sticky");
+ if (IS_ERR(res->axi_m_sticky_reset))
+ return PTR_ERR(res->axi_m_sticky_reset);
+
+ res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev,
+ "pipe_sticky");
+ if (IS_ERR(res->pipe_sticky_reset))
+ return PTR_ERR(res->pipe_sticky_reset);
+
+ res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr");
+ if (IS_ERR(res->pwr_reset))
+ return PTR_ERR(res->pwr_reset);
+
+ res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
+ if (IS_ERR(res->ahb_reset))
+ return PTR_ERR(res->ahb_reset);
+
+ res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb");
+ if (IS_ERR(res->phy_ahb_reset))
+ return PTR_ERR(res->phy_ahb_reset);
+
+ return 0;
+}
+
+static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
+
+ reset_control_assert(res->axi_m_reset);
+ reset_control_assert(res->axi_s_reset);
+ reset_control_assert(res->pipe_reset);
+ reset_control_assert(res->pipe_sticky_reset);
+ reset_control_assert(res->phy_reset);
+ reset_control_assert(res->phy_ahb_reset);
+ reset_control_assert(res->axi_m_sticky_reset);
+ reset_control_assert(res->pwr_reset);
+ reset_control_assert(res->ahb_reset);
+ clk_disable_unprepare(res->aux_clk);
+ clk_disable_unprepare(res->master_clk);
+ clk_disable_unprepare(res->slave_clk);
+}
+
+static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ u32 val;
+ int ret;
+
+ ret = reset_control_assert(res->axi_m_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert axi master reset\n");
+ return ret;
+ }
+
+ ret = reset_control_assert(res->axi_s_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert axi slave reset\n");
+ return ret;
+ }
+
+ usleep_range(10000, 12000);
+
+ ret = reset_control_assert(res->pipe_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert pipe reset\n");
+ return ret;
+ }
+
+ ret = reset_control_assert(res->pipe_sticky_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert pipe sticky reset\n");
+ return ret;
+ }
+
+ ret = reset_control_assert(res->phy_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert phy reset\n");
+ return ret;
+ }
+
+ ret = reset_control_assert(res->phy_ahb_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert phy ahb reset\n");
+ return ret;
+ }
+
+ usleep_range(10000, 12000);
+
+ ret = reset_control_assert(res->axi_m_sticky_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert axi master sticky reset\n");
+ return ret;
+ }
+
+ ret = reset_control_assert(res->pwr_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert power reset\n");
+ return ret;
+ }
+
+ ret = reset_control_assert(res->ahb_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert ahb reset\n");
+ return ret;
+ }
+
+ usleep_range(10000, 12000);
+
+ ret = reset_control_deassert(res->phy_ahb_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert phy ahb reset\n");
+ return ret;
+ }
+
+ ret = reset_control_deassert(res->phy_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert phy reset\n");
+ goto err_rst_phy;
+ }
+
+ ret = reset_control_deassert(res->pipe_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert pipe reset\n");
+ goto err_rst_pipe;
+ }
+
+ ret = reset_control_deassert(res->pipe_sticky_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert pipe sticky reset\n");
+ goto err_rst_pipe_sticky;
+ }
+
+ usleep_range(10000, 12000);
+
+ ret = reset_control_deassert(res->axi_m_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert axi master reset\n");
+ goto err_rst_axi_m;
+ }
+
+ ret = reset_control_deassert(res->axi_m_sticky_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert axi master sticky reset\n");
+ goto err_rst_axi_m_sticky;
+ }
+
+ ret = reset_control_deassert(res->axi_s_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert axi slave reset\n");
+ goto err_rst_axi_s;
+ }
+
+ ret = reset_control_deassert(res->pwr_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert power reset\n");
+ goto err_rst_pwr;
+ }
+
+ ret = reset_control_deassert(res->ahb_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert ahb reset\n");
+ goto err_rst_ahb;
+ }
+
+ usleep_range(10000, 12000);
+
+ ret = clk_prepare_enable(res->aux_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable iface clock\n");
+ goto err_clk_aux;
+ }
+
+ ret = clk_prepare_enable(res->master_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable core clock\n");
+ goto err_clk_axi_m;
+ }
+
+ ret = clk_prepare_enable(res->slave_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable phy clock\n");
+ goto err_clk_axi_s;
+ }
+
+ /* enable PCIe clocks and resets */
+ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+ val &= ~BIT(0);
+ writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+ /* change DBI base address */
+ writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+ /* MAC PHY_POWERDOWN MUX DISABLE */
+ val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
+ val &= ~BIT(29);
+ writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
+
+ val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+ val |= BIT(4);
+ writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+
+ val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+ val |= BIT(31);
+ writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+
+ return 0;
+
+err_clk_axi_s:
+ clk_disable_unprepare(res->master_clk);
+err_clk_axi_m:
+ clk_disable_unprepare(res->aux_clk);
+err_clk_aux:
+ reset_control_assert(res->ahb_reset);
+err_rst_ahb:
+ reset_control_assert(res->pwr_reset);
+err_rst_pwr:
+ reset_control_assert(res->axi_s_reset);
+err_rst_axi_s:
+ reset_control_assert(res->axi_m_sticky_reset);
+err_rst_axi_m_sticky:
+ reset_control_assert(res->axi_m_reset);
+err_rst_axi_m:
+ reset_control_assert(res->pipe_sticky_reset);
+err_rst_pipe_sticky:
+ reset_control_assert(res->pipe_reset);
+err_rst_pipe:
+ reset_control_assert(res->phy_reset);
+err_rst_phy:
+ reset_control_assert(res->phy_ahb_reset);
+ return ret;
+}
+
+static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int i;
+ const char *rst_names[] = { "axi_m", "axi_s", "pipe",
+ "axi_m_sticky", "sticky",
+ "ahb", "sleep", };
+
+ res->iface = devm_clk_get(dev, "iface");
+ if (IS_ERR(res->iface))
+ return PTR_ERR(res->iface);
+
+ res->axi_m_clk = devm_clk_get(dev, "axi_m");
+ if (IS_ERR(res->axi_m_clk))
+ return PTR_ERR(res->axi_m_clk);
+
+ res->axi_s_clk = devm_clk_get(dev, "axi_s");
+ if (IS_ERR(res->axi_s_clk))
+ return PTR_ERR(res->axi_s_clk);
+
+ res->ahb_clk = devm_clk_get(dev, "ahb");
+ if (IS_ERR(res->ahb_clk))
+ return PTR_ERR(res->ahb_clk);
+
+ res->aux_clk = devm_clk_get(dev, "aux");
+ if (IS_ERR(res->aux_clk))
+ return PTR_ERR(res->aux_clk);
+
+ for (i = 0; i < ARRAY_SIZE(rst_names); i++) {
+ res->rst[i] = devm_reset_control_get(dev, rst_names[i]);
+ if (IS_ERR(res->rst[i]))
+ return PTR_ERR(res->rst[i]);
+ }
+
+ return 0;
+}
+
+static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
+
+ clk_disable_unprepare(res->iface);
+ clk_disable_unprepare(res->axi_m_clk);
+ clk_disable_unprepare(res->axi_s_clk);
+ clk_disable_unprepare(res->ahb_clk);
+ clk_disable_unprepare(res->aux_clk);
+}
+
+static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int i, ret;
+ u32 val;
+
+ for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
+ ret = reset_control_assert(res->rst[i]);
+ if (ret) {
+ dev_err(dev, "reset #%d assert failed (%d)\n", i, ret);
+ return ret;
+ }
+ }
+
+ usleep_range(2000, 2500);
+
+ for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
+ ret = reset_control_deassert(res->rst[i]);
+ if (ret) {
+ dev_err(dev, "reset #%d deassert failed (%d)\n", i,
+ ret);
+ return ret;
+ }
+ }
+
+ /*
+ * Don't have a way to see if the reset has completed.
+ * Wait for some time.
+ */
+ usleep_range(2000, 2500);
+
+ ret = clk_prepare_enable(res->iface);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable core clock\n");
+ goto err_clk_iface;
+ }
+
+ ret = clk_prepare_enable(res->axi_m_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable core clock\n");
+ goto err_clk_axi_m;
+ }
+
+ ret = clk_prepare_enable(res->axi_s_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable axi slave clock\n");
+ goto err_clk_axi_s;
+ }
+
+ ret = clk_prepare_enable(res->ahb_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable ahb clock\n");
+ goto err_clk_ahb;
+ }
+
+ ret = clk_prepare_enable(res->aux_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable aux clock\n");
+ goto err_clk_aux;
+ }
+
+ writel(SLV_ADDR_SPACE_SZ,
+ pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE);
+
+ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+ val &= ~BIT(0);
+ writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+ writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+ writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
+ | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
+ AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
+ pcie->parf + PCIE20_PARF_SYS_CTRL);
+ writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH);
+
+ writel(CMD_BME_VAL, pci->dbi_base + PCIE20_COMMAND_STATUS);
+ writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG);
+ writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + PCIE20_CAP_LINK_1);
+
+ val = readl(pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
+ val &= ~PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT;
+ writel(val, pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
+
+ writel(PCIE_CAP_CPL_TIMEOUT_DISABLE, pci->dbi_base +
+ PCIE20_DEVICE_CONTROL2_STATUS2);
+
+ return 0;
+
+err_clk_aux:
+ clk_disable_unprepare(res->ahb_clk);
+err_clk_ahb:
+ clk_disable_unprepare(res->axi_s_clk);
+err_clk_axi_s:
+ clk_disable_unprepare(res->axi_m_clk);
+err_clk_axi_m:
+ clk_disable_unprepare(res->iface);
+err_clk_iface:
+ /*
+ * Not checking for failure, will anyway return
+ * the original failure in 'ret'.
+ */
+ for (i = 0; i < ARRAY_SIZE(res->rst); i++)
+ reset_control_assert(res->rst[i]);
+
+ return ret;
+}
+
+static int qcom_pcie_link_up(struct dw_pcie *pci)
+{
+ u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
+
+ return !!(val & PCI_EXP_LNKSTA_DLLLA);
+}
+
+static int qcom_pcie_host_init(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct qcom_pcie *pcie = to_qcom_pcie(pci);
+ int ret;
+
+<<<<<<<
+ pm_runtime_get_sync(pci->dev);
+=======
+
+ pm_runtime_get_sync(pci->dev);
+
+>>>>>>>
+ qcom_ep_reset_assert(pcie);
+
+ ret = pcie->ops->init(pcie);
+ if (ret)
+ return ret;
+
+ ret = phy_power_on(pcie->phy);
+ if (ret)
+ goto err_deinit;
+
+ if (pcie->ops->post_init) {
+ ret = pcie->ops->post_init(pcie);
+ if (ret)
+ goto err_disable_phy;
+ }
+
+ dw_pcie_setup_rc(pp);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ dw_pcie_msi_init(pp);
+
+ qcom_ep_reset_deassert(pcie);
+
+ ret = qcom_pcie_establish_link(pcie);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ qcom_ep_reset_assert(pcie);
+ if (pcie->ops->post_deinit)
+ pcie->ops->post_deinit(pcie);
+err_disable_phy:
+ phy_power_off(pcie->phy);
+err_deinit:
+ pcie->ops->deinit(pcie);
+<<<<<<<
+ pm_runtime_put(pci->dev);
+=======
+ pm_runtime_put_sync(pci->dev);
+>>>>>>>
+
+ return ret;
+}
+
+static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
+ u32 *val)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
+ /* the device class is not reported correctly from the register */
+ if (where == PCI_CLASS_REVISION && size == 4) {
+ *val = readl(pci->dbi_base + PCI_CLASS_REVISION);
+ *val &= 0xff; /* keep revision id */
+ *val |= PCI_CLASS_BRIDGE_PCI << 16;
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ return dw_pcie_read(pci->dbi_base + where, size, val);
+}
+
+static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
+ .host_init = qcom_pcie_host_init,
+ .rd_own_conf = qcom_pcie_rd_own_conf,
+};
+
+/* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */
+static const struct qcom_pcie_ops ops_2_1_0 = {
+ .get_resources = qcom_pcie_get_resources_2_1_0,
+ .init = qcom_pcie_init_2_1_0,
+ .deinit = qcom_pcie_deinit_2_1_0,
+ .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
+};
+
+/* Qcom IP rev.: 1.0.0 Synopsys IP rev.: 4.11a */
+static const struct qcom_pcie_ops ops_1_0_0 = {
+ .get_resources = qcom_pcie_get_resources_1_0_0,
+ .init = qcom_pcie_init_1_0_0,
+ .deinit = qcom_pcie_deinit_1_0_0,
+ .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
+};
+
+/* Qcom IP rev.: 2.3.2 Synopsys IP rev.: 4.21a */
+static const struct qcom_pcie_ops ops_2_3_2 = {
+ .get_resources = qcom_pcie_get_resources_2_3_2,
+ .init = qcom_pcie_init_2_3_2,
+ .post_init = qcom_pcie_post_init_2_3_2,
+ .deinit = qcom_pcie_deinit_2_3_2,
+ .post_deinit = qcom_pcie_post_deinit_2_3_2,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+};
+
+/* Qcom IP rev.: 2.4.0 Synopsys IP rev.: 4.20a */
+static const struct qcom_pcie_ops ops_2_4_0 = {
+ .get_resources = qcom_pcie_get_resources_2_4_0,
+ .init = qcom_pcie_init_2_4_0,
+ .deinit = qcom_pcie_deinit_2_4_0,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+};
+
+/* Qcom IP rev.: 2.3.3 Synopsys IP rev.: 4.30a */
+static const struct qcom_pcie_ops ops_2_3_3 = {
+ .get_resources = qcom_pcie_get_resources_2_3_3,
+ .init = qcom_pcie_init_2_3_3,
+ .deinit = qcom_pcie_deinit_2_3_3,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+};
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .link_up = qcom_pcie_link_up,
+};
+
+static int qcom_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct pcie_port *pp;
+ struct dw_pcie *pci;
+ struct qcom_pcie *pcie;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ pm_runtime_enable(dev);
+ pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
+ pp = &pci->pp;
+
+ pcie->pci = pci;
+
+ pcie->ops = of_device_get_match_data(dev);
+
+ pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW);
+ if (IS_ERR(pcie->reset))
+ return PTR_ERR(pcie->reset);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf");
+ pcie->parf = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pcie->parf))
+ return PTR_ERR(pcie->parf);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
+ pcie->elbi = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pcie->elbi))
+ return PTR_ERR(pcie->elbi);
+
+ pcie->phy = devm_phy_optional_get(dev, "pciephy");
+ if (IS_ERR(pcie->phy))
+ return PTR_ERR(pcie->phy);
+
+ ret = pcie->ops->get_resources(pcie);
+ if (ret)
+ return ret;
+
+ pp->root_bus_nr = -1;
+ pp->ops = &qcom_pcie_dw_ops;
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ pp->msi_irq = platform_get_irq_byname(pdev, "msi");
+ if (pp->msi_irq < 0)
+ return pp->msi_irq;
+ }
+
+ ret = phy_init(pcie->phy);
+ if (ret) {
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, pcie);
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "cannot initialize host\n");
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id qcom_pcie_match[] = {
+ { .compatible = "qcom,pcie-apq8084", .data = &ops_1_0_0 },
+ { .compatible = "qcom,pcie-ipq8064", .data = &ops_2_1_0 },
+ { .compatible = "qcom,pcie-apq8064", .data = &ops_2_1_0 },
+ { .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 },
+ { .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 },
+ { .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 },
+ { }
+};
+
+static struct platform_driver qcom_pcie_driver = {
+ .probe = qcom_pcie_probe,
+ .driver = {
+ .name = "qcom-pcie",
+ .suppress_bind_attrs = true,
+ .of_match_table = qcom_pcie_match,
+ },
+};
+builtin_platform_driver(qcom_pcie_driver);
diff --git a/rr-cache/c15c8898132c15bbe8de0f76d799b1e6eedea5f1/thisimage b/rr-cache/c15c8898132c15bbe8de0f76d799b1e6eedea5f1/thisimage
new file mode 100644
index 0000000..328ecf4
--- /dev/null
+++ b/rr-cache/c15c8898132c15bbe8de0f76d799b1e6eedea5f1/thisimage
@@ -0,0 +1,1328 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm PCIe root complex driver
+ *
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ * Copyright 2015 Linaro Limited.
+ *
+ * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+#define PCIE20_PARF_SYS_CTRL 0x00
+#define MST_WAKEUP_EN BIT(13)
+#define SLV_WAKEUP_EN BIT(12)
+#define MSTR_ACLK_CGC_DIS BIT(10)
+#define SLV_ACLK_CGC_DIS BIT(9)
+#define CORE_CLK_CGC_DIS BIT(6)
+#define AUX_PWR_DET BIT(4)
+#define L23_CLK_RMV_DIS BIT(2)
+#define L1_CLK_RMV_DIS BIT(1)
+
+#define PCIE20_COMMAND_STATUS 0x04
+#define CMD_BME_VAL 0x4
+#define PCIE20_DEVICE_CONTROL2_STATUS2 0x98
+#define PCIE_CAP_CPL_TIMEOUT_DISABLE 0x10
+
+#define PCIE20_PARF_PHY_CTRL 0x40
+#define PCIE20_PARF_PHY_REFCLK 0x4C
+#define PCIE20_PARF_DBI_BASE_ADDR 0x168
+#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
+#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
+#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178
+#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8
+#define PCIE20_PARF_LTSSM 0x1B0
+#define PCIE20_PARF_SID_OFFSET 0x234
+#define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
+
+#define PCIE20_ELBI_SYS_CTRL 0x04
+#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
+
+#define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818
+#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4
+#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5
+#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c
+#define CFG_BRIDGE_SB_INIT BIT(0)
+
+#define PCIE20_CAP 0x70
+#define PCIE20_CAP_LINK_CAPABILITIES (PCIE20_CAP + 0xC)
+#define PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT (BIT(10) | BIT(11))
+#define PCIE20_CAP_LINK_1 (PCIE20_CAP + 0x14)
+#define PCIE_CAP_LINK1_VAL 0x2FD7F
+
+#define PCIE20_PARF_Q2A_FLUSH 0x1AC
+
+#define PCIE20_MISC_CONTROL_1_REG 0x8BC
+#define DBI_RO_WR_EN 1
+
+#define PERST_DELAY_US 1000
+
+#define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358
+#define SLV_ADDR_SPACE_SZ 0x10000000
+
+#define QCOM_PCIE_2_1_0_MAX_SUPPLY 3
+struct qcom_pcie_resources_2_1_0 {
+ struct clk *iface_clk;
+ struct clk *core_clk;
+ struct clk *ref_clk;
+ struct clk *phy_clk;
+ struct reset_control *pci_reset;
+ struct reset_control *axi_reset;
+ struct reset_control *ahb_reset;
+ struct reset_control *por_reset;
+ struct reset_control *phy_reset;
+ struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
+};
+
+struct qcom_pcie_resources_1_0_0 {
+ struct clk *iface;
+ struct clk *aux;
+ struct clk *master_bus;
+ struct clk *slave_bus;
+ struct reset_control *core;
+ struct regulator *vdda;
+};
+
+#define QCOM_PCIE_2_3_2_MAX_SUPPLY 2
+struct qcom_pcie_resources_2_3_2 {
+ struct clk *aux_clk;
+ struct clk *master_clk;
+ struct clk *slave_clk;
+ struct clk *cfg_clk;
+ struct clk *pipe_clk;
+ struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
+};
+
+struct qcom_pcie_resources_2_4_0 {
+ struct clk *aux_clk;
+ struct clk *master_clk;
+ struct clk *slave_clk;
+ struct reset_control *axi_m_reset;
+ struct reset_control *axi_s_reset;
+ struct reset_control *pipe_reset;
+ struct reset_control *axi_m_vmid_reset;
+ struct reset_control *axi_s_xpu_reset;
+ struct reset_control *parf_reset;
+ struct reset_control *phy_reset;
+ struct reset_control *axi_m_sticky_reset;
+ struct reset_control *pipe_sticky_reset;
+ struct reset_control *pwr_reset;
+ struct reset_control *ahb_reset;
+ struct reset_control *phy_ahb_reset;
+};
+
+struct qcom_pcie_resources_2_3_3 {
+ struct clk *iface;
+ struct clk *axi_m_clk;
+ struct clk *axi_s_clk;
+ struct clk *ahb_clk;
+ struct clk *aux_clk;
+ struct reset_control *rst[7];
+};
+
+union qcom_pcie_resources {
+ struct qcom_pcie_resources_1_0_0 v1_0_0;
+ struct qcom_pcie_resources_2_1_0 v2_1_0;
+ struct qcom_pcie_resources_2_3_2 v2_3_2;
+ struct qcom_pcie_resources_2_3_3 v2_3_3;
+ struct qcom_pcie_resources_2_4_0 v2_4_0;
+};
+
+struct qcom_pcie;
+
+struct qcom_pcie_ops {
+ int (*get_resources)(struct qcom_pcie *pcie);
+ int (*init)(struct qcom_pcie *pcie);
+ int (*post_init)(struct qcom_pcie *pcie);
+ void (*deinit)(struct qcom_pcie *pcie);
+ void (*post_deinit)(struct qcom_pcie *pcie);
+ void (*ltssm_enable)(struct qcom_pcie *pcie);
+};
+
+struct qcom_pcie {
+ struct dw_pcie *pci;
+ void __iomem *parf; /* DT parf */
+ void __iomem *elbi; /* DT elbi */
+ union qcom_pcie_resources res;
+ struct phy *phy;
+ struct gpio_desc *reset;
+ const struct qcom_pcie_ops *ops;
+};
+
+#define to_qcom_pcie(x) dev_get_drvdata((x)->dev)
+
+static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
+{
+ gpiod_set_value_cansleep(pcie->reset, 1);
+ usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
+}
+
+static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
+{
+ gpiod_set_value_cansleep(pcie->reset, 0);
+ usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
+}
+
+static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+
+ if (dw_pcie_link_up(pci))
+ return 0;
+
+ /* Enable Link Training state machine */
+ if (pcie->ops->ltssm_enable)
+ pcie->ops->ltssm_enable(pcie);
+
+ return dw_pcie_wait_for_link(pci);
+}
+
+static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
+{
+ u32 val;
+
+ /* enable link training */
+ val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
+ val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
+ writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
+}
+
+static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ res->supplies[0].supply = "vdda";
+ res->supplies[1].supply = "vdda_phy";
+ res->supplies[2].supply = "vdda_refclk";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
+ res->supplies);
+ if (ret)
+ return ret;
+
+ res->iface_clk = devm_clk_get(dev, "iface");
+ if (IS_ERR(res->iface_clk))
+ return PTR_ERR(res->iface_clk);
+
+ res->ref_clk = devm_clk_get(dev, "ref");
+
+ if (IS_ERR(res->ref_clk)) {
+ if (PTR_ERR(res->ref_clk) == -EPROBE_DEFER)
+ return PTR_ERR(res->ref_clk);
+
+ res->ref_clk = NULL;
+ }
+
+ res->core_clk = devm_clk_get(dev, "core");
+ if (IS_ERR(res->core_clk))
+ return PTR_ERR(res->core_clk);
+
+ res->phy_clk = devm_clk_get(dev, "phy");
+ if (IS_ERR(res->phy_clk))
+ return PTR_ERR(res->phy_clk);
+
+ res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
+ if (IS_ERR(res->pci_reset))
+ return PTR_ERR(res->pci_reset);
+
+ res->axi_reset = devm_reset_control_get_exclusive(dev, "axi");
+ if (IS_ERR(res->axi_reset))
+ return PTR_ERR(res->axi_reset);
+
+ res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
+ if (IS_ERR(res->ahb_reset))
+ return PTR_ERR(res->ahb_reset);
+
+ res->por_reset = devm_reset_control_get_exclusive(dev, "por");
+ if (IS_ERR(res->por_reset))
+ return PTR_ERR(res->por_reset);
+
+ res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
+ return PTR_ERR_OR_ZERO(res->phy_reset);
+}
+
+static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
+
+ reset_control_assert(res->pci_reset);
+ reset_control_assert(res->axi_reset);
+ reset_control_assert(res->ahb_reset);
+ reset_control_assert(res->por_reset);
+ reset_control_assert(res->pci_reset);
+ clk_disable_unprepare(res->iface_clk);
+ clk_disable_unprepare(res->ref_clk);
+ clk_disable_unprepare(res->core_clk);
+ clk_disable_unprepare(res->phy_clk);
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+}
+
+static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ u32 val;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
+ if (ret < 0) {
+ dev_err(dev, "cannot enable regulators\n");
+ return ret;
+ }
+
+ ret = reset_control_assert(res->ahb_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert ahb reset\n");
+ goto err_assert_ahb;
+ }
+
+ ret = clk_prepare_enable(res->ref_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable ref clock\n");
+ goto err_assert_ahb;
+ }
+
+ ret = clk_prepare_enable(res->iface_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable iface clock\n");
+ goto err_clk_iface;
+ }
+
+ ret = clk_prepare_enable(res->phy_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable phy clock\n");
+ goto err_clk_phy;
+ }
+
+ ret = clk_prepare_enable(res->core_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable core clock\n");
+ goto err_clk_core;
+ }
+
+ ret = reset_control_deassert(res->ahb_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert ahb reset\n");
+ goto err_deassert_ahb;
+ }
+
+ /* enable PCIe clocks and resets */
+ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+ val &= ~BIT(0);
+ writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+ /* enable external reference clock */
+ val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
+ val |= BIT(16);
+ writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
+
+ ret = reset_control_deassert(res->phy_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert phy reset\n");
+ return ret;
+ }
+
+ ret = reset_control_deassert(res->pci_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert pci reset\n");
+ return ret;
+ }
+
+ ret = reset_control_deassert(res->por_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert por reset\n");
+ return ret;
+ }
+
+ ret = reset_control_deassert(res->axi_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert axi reset\n");
+ return ret;
+ }
+
+ /* wait for clock acquisition */
+ usleep_range(1000, 1500);
+
+
+ /* Set the Max TLP size to 2K, instead of using default of 4K */
+ writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
+ pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0);
+ writel(CFG_BRIDGE_SB_INIT,
+ pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);
+
+ return 0;
+
+err_deassert_ahb:
+ clk_disable_unprepare(res->core_clk);
+err_clk_core:
+ clk_disable_unprepare(res->phy_clk);
+err_clk_phy:
+ clk_disable_unprepare(res->iface_clk);
+err_clk_iface:
+ clk_disable_unprepare(res->ref_clk);
+err_assert_ahb:
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+
+ return ret;
+}
+
+static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+
+ res->vdda = devm_regulator_get(dev, "vdda");
+ if (IS_ERR(res->vdda))
+ return PTR_ERR(res->vdda);
+
+ res->iface = devm_clk_get(dev, "iface");
+ if (IS_ERR(res->iface))
+ return PTR_ERR(res->iface);
+
+ res->aux = devm_clk_get(dev, "aux");
+ if (IS_ERR(res->aux))
+ return PTR_ERR(res->aux);
+
+ res->master_bus = devm_clk_get(dev, "master_bus");
+ if (IS_ERR(res->master_bus))
+ return PTR_ERR(res->master_bus);
+
+ res->slave_bus = devm_clk_get(dev, "slave_bus");
+ if (IS_ERR(res->slave_bus))
+ return PTR_ERR(res->slave_bus);
+
+ res->core = devm_reset_control_get_exclusive(dev, "core");
+ return PTR_ERR_OR_ZERO(res->core);
+}
+
+static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
+
+ reset_control_assert(res->core);
+ clk_disable_unprepare(res->slave_bus);
+ clk_disable_unprepare(res->master_bus);
+ clk_disable_unprepare(res->iface);
+ clk_disable_unprepare(res->aux);
+ regulator_disable(res->vdda);
+}
+
+static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ ret = reset_control_deassert(res->core);
+ if (ret) {
+ dev_err(dev, "cannot deassert core reset\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(res->aux);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable aux clock\n");
+ goto err_res;
+ }
+
+ ret = clk_prepare_enable(res->iface);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable iface clock\n");
+ goto err_aux;
+ }
+
+ ret = clk_prepare_enable(res->master_bus);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable master_bus clock\n");
+ goto err_iface;
+ }
+
+ ret = clk_prepare_enable(res->slave_bus);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable slave_bus clock\n");
+ goto err_master;
+ }
+
+ ret = regulator_enable(res->vdda);
+ if (ret) {
+ dev_err(dev, "cannot enable vdda regulator\n");
+ goto err_slave;
+ }
+
+ /* change DBI base address */
+ writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+
+ val |= BIT(31);
+ writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+ }
+
+ return 0;
+err_slave:
+ clk_disable_unprepare(res->slave_bus);
+err_master:
+ clk_disable_unprepare(res->master_bus);
+err_iface:
+ clk_disable_unprepare(res->iface);
+err_aux:
+ clk_disable_unprepare(res->aux);
+err_res:
+ reset_control_assert(res->core);
+
+ return ret;
+}
+
+static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
+{
+ u32 val;
+
+ /* enable link training */
+ val = readl(pcie->parf + PCIE20_PARF_LTSSM);
+ val |= BIT(8);
+ writel(val, pcie->parf + PCIE20_PARF_LTSSM);
+}
+
+static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ res->supplies[0].supply = "vdda";
+ res->supplies[1].supply = "vddpe-3v3";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
+ res->supplies);
+ if (ret)
+ return ret;
+
+ res->aux_clk = devm_clk_get(dev, "aux");
+ if (IS_ERR(res->aux_clk))
+ return PTR_ERR(res->aux_clk);
+
+ res->cfg_clk = devm_clk_get(dev, "cfg");
+ if (IS_ERR(res->cfg_clk))
+ return PTR_ERR(res->cfg_clk);
+
+ res->master_clk = devm_clk_get(dev, "bus_master");
+ if (IS_ERR(res->master_clk))
+ return PTR_ERR(res->master_clk);
+
+ res->slave_clk = devm_clk_get(dev, "bus_slave");
+ if (IS_ERR(res->slave_clk))
+ return PTR_ERR(res->slave_clk);
+
+ res->pipe_clk = devm_clk_get(dev, "pipe");
+ return PTR_ERR_OR_ZERO(res->pipe_clk);
+}
+
+static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
+
+ clk_disable_unprepare(res->slave_clk);
+ clk_disable_unprepare(res->master_clk);
+ clk_disable_unprepare(res->cfg_clk);
+ clk_disable_unprepare(res->aux_clk);
+
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+}
+
+static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
+
+ clk_disable_unprepare(res->pipe_clk);
+}
+
+static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ u32 val;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
+ if (ret < 0) {
+ dev_err(dev, "cannot enable regulators\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(res->aux_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable aux clock\n");
+ goto err_aux_clk;
+ }
+
+ ret = clk_prepare_enable(res->cfg_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable cfg clock\n");
+ goto err_cfg_clk;
+ }
+
+ ret = clk_prepare_enable(res->master_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable master clock\n");
+ goto err_master_clk;
+ }
+
+ ret = clk_prepare_enable(res->slave_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable slave clock\n");
+ goto err_slave_clk;
+ }
+
+ /* enable PCIe clocks and resets */
+ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+ val &= ~BIT(0);
+ writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+ /* change DBI base address */
+ writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+ /* MAC PHY_POWERDOWN MUX DISABLE */
+ val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
+ val &= ~BIT(29);
+ writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
+
+ val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+ val |= BIT(4);
+ writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+
+ val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+ val |= BIT(31);
+ writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+
+ return 0;
+
+err_slave_clk:
+ clk_disable_unprepare(res->master_clk);
+err_master_clk:
+ clk_disable_unprepare(res->cfg_clk);
+err_cfg_clk:
+ clk_disable_unprepare(res->aux_clk);
+
+err_aux_clk:
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+
+ return ret;
+}
+
+static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ ret = clk_prepare_enable(res->pipe_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable pipe clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+
+ res->aux_clk = devm_clk_get(dev, "aux");
+ if (IS_ERR(res->aux_clk))
+ return PTR_ERR(res->aux_clk);
+
+ res->master_clk = devm_clk_get(dev, "master_bus");
+ if (IS_ERR(res->master_clk))
+ return PTR_ERR(res->master_clk);
+
+ res->slave_clk = devm_clk_get(dev, "slave_bus");
+ if (IS_ERR(res->slave_clk))
+ return PTR_ERR(res->slave_clk);
+
+ res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m");
+ if (IS_ERR(res->axi_m_reset))
+ return PTR_ERR(res->axi_m_reset);
+
+ res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s");
+ if (IS_ERR(res->axi_s_reset))
+ return PTR_ERR(res->axi_s_reset);
+
+ res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe");
+ if (IS_ERR(res->pipe_reset))
+ return PTR_ERR(res->pipe_reset);
+
+ res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev,
+ "axi_m_vmid");
+ if (IS_ERR(res->axi_m_vmid_reset))
+ return PTR_ERR(res->axi_m_vmid_reset);
+
+ res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev,
+ "axi_s_xpu");
+ if (IS_ERR(res->axi_s_xpu_reset))
+ return PTR_ERR(res->axi_s_xpu_reset);
+
+ res->parf_reset = devm_reset_control_get_exclusive(dev, "parf");
+ if (IS_ERR(res->parf_reset))
+ return PTR_ERR(res->parf_reset);
+
+ res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
+ if (IS_ERR(res->phy_reset))
+ return PTR_ERR(res->phy_reset);
+
+ res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev,
+ "axi_m_sticky");
+ if (IS_ERR(res->axi_m_sticky_reset))
+ return PTR_ERR(res->axi_m_sticky_reset);
+
+ res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev,
+ "pipe_sticky");
+ if (IS_ERR(res->pipe_sticky_reset))
+ return PTR_ERR(res->pipe_sticky_reset);
+
+ res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr");
+ if (IS_ERR(res->pwr_reset))
+ return PTR_ERR(res->pwr_reset);
+
+ res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
+ if (IS_ERR(res->ahb_reset))
+ return PTR_ERR(res->ahb_reset);
+
+ res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb");
+ if (IS_ERR(res->phy_ahb_reset))
+ return PTR_ERR(res->phy_ahb_reset);
+
+ return 0;
+}
+
+static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
+
+ reset_control_assert(res->axi_m_reset);
+ reset_control_assert(res->axi_s_reset);
+ reset_control_assert(res->pipe_reset);
+ reset_control_assert(res->pipe_sticky_reset);
+ reset_control_assert(res->phy_reset);
+ reset_control_assert(res->phy_ahb_reset);
+ reset_control_assert(res->axi_m_sticky_reset);
+ reset_control_assert(res->pwr_reset);
+ reset_control_assert(res->ahb_reset);
+ clk_disable_unprepare(res->aux_clk);
+ clk_disable_unprepare(res->master_clk);
+ clk_disable_unprepare(res->slave_clk);
+}
+
+static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ u32 val;
+ int ret;
+
+ ret = reset_control_assert(res->axi_m_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert axi master reset\n");
+ return ret;
+ }
+
+ ret = reset_control_assert(res->axi_s_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert axi slave reset\n");
+ return ret;
+ }
+
+ usleep_range(10000, 12000);
+
+ ret = reset_control_assert(res->pipe_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert pipe reset\n");
+ return ret;
+ }
+
+ ret = reset_control_assert(res->pipe_sticky_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert pipe sticky reset\n");
+ return ret;
+ }
+
+ ret = reset_control_assert(res->phy_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert phy reset\n");
+ return ret;
+ }
+
+ ret = reset_control_assert(res->phy_ahb_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert phy ahb reset\n");
+ return ret;
+ }
+
+ usleep_range(10000, 12000);
+
+ ret = reset_control_assert(res->axi_m_sticky_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert axi master sticky reset\n");
+ return ret;
+ }
+
+ ret = reset_control_assert(res->pwr_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert power reset\n");
+ return ret;
+ }
+
+ ret = reset_control_assert(res->ahb_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert ahb reset\n");
+ return ret;
+ }
+
+ usleep_range(10000, 12000);
+
+ ret = reset_control_deassert(res->phy_ahb_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert phy ahb reset\n");
+ return ret;
+ }
+
+ ret = reset_control_deassert(res->phy_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert phy reset\n");
+ goto err_rst_phy;
+ }
+
+ ret = reset_control_deassert(res->pipe_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert pipe reset\n");
+ goto err_rst_pipe;
+ }
+
+ ret = reset_control_deassert(res->pipe_sticky_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert pipe sticky reset\n");
+ goto err_rst_pipe_sticky;
+ }
+
+ usleep_range(10000, 12000);
+
+ ret = reset_control_deassert(res->axi_m_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert axi master reset\n");
+ goto err_rst_axi_m;
+ }
+
+ ret = reset_control_deassert(res->axi_m_sticky_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert axi master sticky reset\n");
+ goto err_rst_axi_m_sticky;
+ }
+
+ ret = reset_control_deassert(res->axi_s_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert axi slave reset\n");
+ goto err_rst_axi_s;
+ }
+
+ ret = reset_control_deassert(res->pwr_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert power reset\n");
+ goto err_rst_pwr;
+ }
+
+ ret = reset_control_deassert(res->ahb_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert ahb reset\n");
+ goto err_rst_ahb;
+ }
+
+ usleep_range(10000, 12000);
+
+ ret = clk_prepare_enable(res->aux_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable iface clock\n");
+ goto err_clk_aux;
+ }
+
+ ret = clk_prepare_enable(res->master_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable core clock\n");
+ goto err_clk_axi_m;
+ }
+
+ ret = clk_prepare_enable(res->slave_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable phy clock\n");
+ goto err_clk_axi_s;
+ }
+
+ /* enable PCIe clocks and resets */
+ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+ val &= ~BIT(0);
+ writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+ /* change DBI base address */
+ writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+ /* MAC PHY_POWERDOWN MUX DISABLE */
+ val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
+ val &= ~BIT(29);
+ writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
+
+ val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+ val |= BIT(4);
+ writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+
+ val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+ val |= BIT(31);
+ writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+
+ return 0;
+
+err_clk_axi_s:
+ clk_disable_unprepare(res->master_clk);
+err_clk_axi_m:
+ clk_disable_unprepare(res->aux_clk);
+err_clk_aux:
+ reset_control_assert(res->ahb_reset);
+err_rst_ahb:
+ reset_control_assert(res->pwr_reset);
+err_rst_pwr:
+ reset_control_assert(res->axi_s_reset);
+err_rst_axi_s:
+ reset_control_assert(res->axi_m_sticky_reset);
+err_rst_axi_m_sticky:
+ reset_control_assert(res->axi_m_reset);
+err_rst_axi_m:
+ reset_control_assert(res->pipe_sticky_reset);
+err_rst_pipe_sticky:
+ reset_control_assert(res->pipe_reset);
+err_rst_pipe:
+ reset_control_assert(res->phy_reset);
+err_rst_phy:
+ reset_control_assert(res->phy_ahb_reset);
+ return ret;
+}
+
+static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int i;
+ const char *rst_names[] = { "axi_m", "axi_s", "pipe",
+ "axi_m_sticky", "sticky",
+ "ahb", "sleep", };
+
+ res->iface = devm_clk_get(dev, "iface");
+ if (IS_ERR(res->iface))
+ return PTR_ERR(res->iface);
+
+ res->axi_m_clk = devm_clk_get(dev, "axi_m");
+ if (IS_ERR(res->axi_m_clk))
+ return PTR_ERR(res->axi_m_clk);
+
+ res->axi_s_clk = devm_clk_get(dev, "axi_s");
+ if (IS_ERR(res->axi_s_clk))
+ return PTR_ERR(res->axi_s_clk);
+
+ res->ahb_clk = devm_clk_get(dev, "ahb");
+ if (IS_ERR(res->ahb_clk))
+ return PTR_ERR(res->ahb_clk);
+
+ res->aux_clk = devm_clk_get(dev, "aux");
+ if (IS_ERR(res->aux_clk))
+ return PTR_ERR(res->aux_clk);
+
+ for (i = 0; i < ARRAY_SIZE(rst_names); i++) {
+ res->rst[i] = devm_reset_control_get(dev, rst_names[i]);
+ if (IS_ERR(res->rst[i]))
+ return PTR_ERR(res->rst[i]);
+ }
+
+ return 0;
+}
+
+static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
+
+ clk_disable_unprepare(res->iface);
+ clk_disable_unprepare(res->axi_m_clk);
+ clk_disable_unprepare(res->axi_s_clk);
+ clk_disable_unprepare(res->ahb_clk);
+ clk_disable_unprepare(res->aux_clk);
+}
+
+static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int i, ret;
+ u32 val;
+
+ for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
+ ret = reset_control_assert(res->rst[i]);
+ if (ret) {
+ dev_err(dev, "reset #%d assert failed (%d)\n", i, ret);
+ return ret;
+ }
+ }
+
+ usleep_range(2000, 2500);
+
+ for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
+ ret = reset_control_deassert(res->rst[i]);
+ if (ret) {
+ dev_err(dev, "reset #%d deassert failed (%d)\n", i,
+ ret);
+ return ret;
+ }
+ }
+
+ /*
+ * Don't have a way to see if the reset has completed.
+ * Wait for some time.
+ */
+ usleep_range(2000, 2500);
+
+ ret = clk_prepare_enable(res->iface);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable core clock\n");
+ goto err_clk_iface;
+ }
+
+ ret = clk_prepare_enable(res->axi_m_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable core clock\n");
+ goto err_clk_axi_m;
+ }
+
+ ret = clk_prepare_enable(res->axi_s_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable axi slave clock\n");
+ goto err_clk_axi_s;
+ }
+
+ ret = clk_prepare_enable(res->ahb_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable ahb clock\n");
+ goto err_clk_ahb;
+ }
+
+ ret = clk_prepare_enable(res->aux_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable aux clock\n");
+ goto err_clk_aux;
+ }
+
+ writel(SLV_ADDR_SPACE_SZ,
+ pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE);
+
+ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+ val &= ~BIT(0);
+ writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+ writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+ writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
+ | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
+ AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
+ pcie->parf + PCIE20_PARF_SYS_CTRL);
+ writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH);
+
+ writel(CMD_BME_VAL, pci->dbi_base + PCIE20_COMMAND_STATUS);
+ writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG);
+ writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + PCIE20_CAP_LINK_1);
+
+ val = readl(pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
+ val &= ~PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT;
+ writel(val, pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
+
+ writel(PCIE_CAP_CPL_TIMEOUT_DISABLE, pci->dbi_base +
+ PCIE20_DEVICE_CONTROL2_STATUS2);
+
+ return 0;
+
+err_clk_aux:
+ clk_disable_unprepare(res->ahb_clk);
+err_clk_ahb:
+ clk_disable_unprepare(res->axi_s_clk);
+err_clk_axi_s:
+ clk_disable_unprepare(res->axi_m_clk);
+err_clk_axi_m:
+ clk_disable_unprepare(res->iface);
+err_clk_iface:
+ /*
+ * Not checking for failure, will anyway return
+ * the original failure in 'ret'.
+ */
+ for (i = 0; i < ARRAY_SIZE(res->rst); i++)
+ reset_control_assert(res->rst[i]);
+
+ return ret;
+}
+
+static int qcom_pcie_link_up(struct dw_pcie *pci)
+{
+ u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
+
+ return !!(val & PCI_EXP_LNKSTA_DLLLA);
+}
+
+static int qcom_pcie_host_init(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct qcom_pcie *pcie = to_qcom_pcie(pci);
+ int ret;
+
+<<<<<<<
+ pm_runtime_get_sync(pci->dev);
+=======
+
+ pm_runtime_get_sync(pci->dev);
+
+>>>>>>>
+ qcom_ep_reset_assert(pcie);
+
+ ret = pcie->ops->init(pcie);
+ if (ret)
+ return ret;
+
+ ret = phy_power_on(pcie->phy);
+ if (ret)
+ goto err_deinit;
+
+ if (pcie->ops->post_init) {
+ ret = pcie->ops->post_init(pcie);
+ if (ret)
+ goto err_disable_phy;
+ }
+
+ dw_pcie_setup_rc(pp);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ dw_pcie_msi_init(pp);
+
+ qcom_ep_reset_deassert(pcie);
+
+ ret = qcom_pcie_establish_link(pcie);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ qcom_ep_reset_assert(pcie);
+ if (pcie->ops->post_deinit)
+ pcie->ops->post_deinit(pcie);
+err_disable_phy:
+ phy_power_off(pcie->phy);
+err_deinit:
+ pcie->ops->deinit(pcie);
+<<<<<<<
+ pm_runtime_put(pci->dev);
+=======
+ pm_runtime_put_sync(pci->dev);
+>>>>>>>
+
+ return ret;
+}
+
+static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
+ u32 *val)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
+ /* the device class is not reported correctly from the register */
+ if (where == PCI_CLASS_REVISION && size == 4) {
+ *val = readl(pci->dbi_base + PCI_CLASS_REVISION);
+ *val &= 0xff; /* keep revision id */
+ *val |= PCI_CLASS_BRIDGE_PCI << 16;
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ return dw_pcie_read(pci->dbi_base + where, size, val);
+}
+
+static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
+ .host_init = qcom_pcie_host_init,
+ .rd_own_conf = qcom_pcie_rd_own_conf,
+};
+
+/* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */
+static const struct qcom_pcie_ops ops_2_1_0 = {
+ .get_resources = qcom_pcie_get_resources_2_1_0,
+ .init = qcom_pcie_init_2_1_0,
+ .deinit = qcom_pcie_deinit_2_1_0,
+ .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
+};
+
+/* Qcom IP rev.: 1.0.0 Synopsys IP rev.: 4.11a */
+static const struct qcom_pcie_ops ops_1_0_0 = {
+ .get_resources = qcom_pcie_get_resources_1_0_0,
+ .init = qcom_pcie_init_1_0_0,
+ .deinit = qcom_pcie_deinit_1_0_0,
+ .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
+};
+
+/* Qcom IP rev.: 2.3.2 Synopsys IP rev.: 4.21a */
+static const struct qcom_pcie_ops ops_2_3_2 = {
+ .get_resources = qcom_pcie_get_resources_2_3_2,
+ .init = qcom_pcie_init_2_3_2,
+ .post_init = qcom_pcie_post_init_2_3_2,
+ .deinit = qcom_pcie_deinit_2_3_2,
+ .post_deinit = qcom_pcie_post_deinit_2_3_2,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+};
+
+/* Qcom IP rev.: 2.4.0 Synopsys IP rev.: 4.20a */
+static const struct qcom_pcie_ops ops_2_4_0 = {
+ .get_resources = qcom_pcie_get_resources_2_4_0,
+ .init = qcom_pcie_init_2_4_0,
+ .deinit = qcom_pcie_deinit_2_4_0,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+};
+
+/* Qcom IP rev.: 2.3.3 Synopsys IP rev.: 4.30a */
+static const struct qcom_pcie_ops ops_2_3_3 = {
+ .get_resources = qcom_pcie_get_resources_2_3_3,
+ .init = qcom_pcie_init_2_3_3,
+ .deinit = qcom_pcie_deinit_2_3_3,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+};
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .link_up = qcom_pcie_link_up,
+};
+
+static int qcom_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct pcie_port *pp;
+ struct dw_pcie *pci;
+ struct qcom_pcie *pcie;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ pm_runtime_enable(dev);
+ pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
+ pp = &pci->pp;
+
+ pcie->pci = pci;
+
+ pcie->ops = of_device_get_match_data(dev);
+
+ pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW);
+ if (IS_ERR(pcie->reset))
+ return PTR_ERR(pcie->reset);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf");
+ pcie->parf = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pcie->parf))
+ return PTR_ERR(pcie->parf);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
+ pcie->elbi = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pcie->elbi))
+ return PTR_ERR(pcie->elbi);
+
+ pcie->phy = devm_phy_optional_get(dev, "pciephy");
+ if (IS_ERR(pcie->phy))
+ return PTR_ERR(pcie->phy);
+
+ ret = pcie->ops->get_resources(pcie);
+ if (ret)
+ return ret;
+
+ pp->root_bus_nr = -1;
+ pp->ops = &qcom_pcie_dw_ops;
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ pp->msi_irq = platform_get_irq_byname(pdev, "msi");
+ if (pp->msi_irq < 0)
+ return pp->msi_irq;
+ }
+
+ ret = phy_init(pcie->phy);
+ if (ret) {
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, pcie);
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "cannot initialize host\n");
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id qcom_pcie_match[] = {
+ { .compatible = "qcom,pcie-apq8084", .data = &ops_1_0_0 },
+ { .compatible = "qcom,pcie-ipq8064", .data = &ops_2_1_0 },
+ { .compatible = "qcom,pcie-apq8064", .data = &ops_2_1_0 },
+ { .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 },
+ { .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 },
+ { .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 },
+ { }
+};
+
+static struct platform_driver qcom_pcie_driver = {
+ .probe = qcom_pcie_probe,
+ .driver = {
+ .name = "qcom-pcie",
+ .suppress_bind_attrs = true,
+ .of_match_table = qcom_pcie_match,
+ },
+};
+builtin_platform_driver(qcom_pcie_driver);
diff --git a/rr-cache/c1e82d583f89c73ff0394c90cc67545bc41ca1e1/preimage b/rr-cache/c1e82d583f89c73ff0394c90cc67545bc41ca1e1/preimage
new file mode 100644
index 0000000..8399dde
--- /dev/null
+++ b/rr-cache/c1e82d583f89c73ff0394c90cc67545bc41ca1e1/preimage
@@ -0,0 +1,399 @@
+/*
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8996.dtsi"
+#include "pm8994.dtsi"
+#include "pmi8994.dtsi"
+#include "apq8096-db820c-pins.dtsi"
+#include "apq8096-db820c-pmic-pins.dtsi"
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/sound/qcom,q6afe.h>
+#include <dt-bindings/sound/qcom,q6asm.h>
+/ {
+ aliases {
+ serial0 = &blsp2_uart1;
+ serial1 = &blsp2_uart2;
+ i2c0 = &blsp1_i2c2;
+ i2c1 = &blsp2_i2c1;
+ i2c2 = &blsp2_i2c0;
+ spi0 = &blsp1_spi0;
+ spi1 = &blsp2_spi5;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ soc {
+ serial@75b0000 {
+ label = "LS-UART1";
+ status = "okay";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_uart1_2pins_default>;
+ pinctrl-1 = <&blsp2_uart1_2pins_sleep>;
+ };
+
+ serial@75b1000 {
+ label = "LS-UART0";
+ status = "okay";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_uart2_4pins_default>;
+ pinctrl-1 = <&blsp2_uart2_4pins_sleep>;
+ };
+
+ i2c@7577000 {
+ /* On Low speed expansion */
+ label = "LS-I2C0";
+ status = "okay";
+ };
+
+ i2c@75b6000 {
+ /* On Low speed expansion */
+ label = "LS-I2C1";
+ status = "okay";
+ };
+
+ spi@7575000 {
+ /* On Low speed expansion */
+ label = "LS-SPI0";
+ status = "okay";
+ };
+
+ i2c@75b5000 {
+ /* On High speed expansion */
+ label = "HS-I2C2";
+ status = "okay";
+ };
+
+ spi@75ba000{
+ /* On High speed expansion */
+ label = "HS-SPI1";
+ status = "okay";
+ };
+
+ sdhci@74a4900 {
+ /* External SD card */
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
+ pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>;
+ cd-gpios = <&msmgpio 38 0x1>;
+ status = "okay";
+ };
+
+ phy@34000 {
+ status = "okay";
+ };
+
+ phy@7410000 {
+ status = "okay";
+ };
+
+ phy@7411000 {
+ status = "okay";
+ };
+
+ phy@7412000 {
+ status = "okay";
+ };
+
+ usb@6a00000 {
+ status = "okay";
+
+ dwc3@6a00000 {
+ extcon = <&usb3_id>;
+ dr_mode = "otg";
+ };
+ };
+
+ usb3_id: usb3-id {
+ compatible = "linux,extcon-usb-gpio";
+ id-gpio = <&pm8994_gpios 22 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb3_vbus_det_gpio>;
+ };
+
+ usb@7600000 {
+ status = "okay";
+
+ dwc3@7600000 {
+ extcon = <&usb2_id>;
+ dr_mode = "otg";
+ maximum-speed = "high-speed";
+ };
+ };
+
+ usb2_id: usb2-id {
+ compatible = "linux,extcon-usb-gpio";
+ id-gpio = <&pmi8994_gpios 6 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb2_vbus_det_gpio>;
+ };
+
+ agnoc@0 {
+ qcom,pcie@600000 {
+ perst-gpio = <&msmgpio 35 GPIO_ACTIVE_LOW>;
+ };
+
+ qcom,pcie@608000 {
+ status = "okay";
+ perst-gpio = <&msmgpio 130 GPIO_ACTIVE_LOW>;
+ };
+
+ qcom,pcie@610000 {
+ status = "okay";
+ perst-gpio = <&msmgpio 114 GPIO_ACTIVE_LOW>;
+ };
+ };
+<<<<<<<
+=======
+
+ mdss@900000 {
+ status = "okay";
+
+ mdp@901000 {
+ status = "okay";
+ };
+
+ hdmi-phy@9a0600 {
+ status = "okay";
+
+ vddio-supply = <&pm8994_l12>;
+ vcca-supply = <&pm8994_l28>;
+ };
+
+ hdmi-tx@9a0000 {
+ status = "okay";
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&hdmi_hpd_active &hdmi_ddc_active>;
+ pinctrl-1 = <&hdmi_hpd_suspend &hdmi_ddc_suspend>;
+
+ core-vdda-supply = <&pm8994_l12>;
+ core-vcc-supply = <&pm8994_s4>;
+ #sound-dai-cells = <1>;
+ };
+ };
+>>>>>>>
+ };
+
+ gpio_keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ autorepeat;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&volume_up_gpio>;
+
+ button@0 {
+ label = "Volume Up";
+ linux,code = <KEY_VOLUMEUP>;
+ gpios = <&pm8994_gpios 2 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ rpm-glink {
+ rpm_requests {
+ pm8994-regulators {
+ vdd_l1-supply = <&pm8994_s3>;
+ vdd_l2_l26_l28-supply = <&pm8994_s3>;
+ vdd_l3_l11-supply = <&pm8994_s3>;
+ vdd_l4_l27_l31-supply = <&pm8994_s3>;
+ vdd_l5_l7-supply = <&pm8994_s5>;
+ vdd_l14_l15-supply = <&pm8994_s5>;
+ vdd_l20_l21-supply = <&pm8994_s5>;
+ vdd_l25-supply = <&pm8994_s3>;
+
+ s3 {
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <1300000>;
+ };
+
+ /**
+ * 1.8v required on LS expansion
+ * for mezzanine boards
+ */
+ s4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+ s5 {
+ regulator-min-microvolt = <2150000>;
+ regulator-max-microvolt = <2150000>;
+ };
+ s7 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ };
+
+ l1 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ };
+ l2 {
+ regulator-min-microvolt = <1250000>;
+ regulator-max-microvolt = <1250000>;
+ };
+ l3 {
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ };
+ l4 {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ };
+ l6 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+ l8 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l9 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l10 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l11 {
+ regulator-min-microvolt = <1150000>;
+ regulator-max-microvolt = <1150000>;
+ };
+ l12 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l13 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ };
+ l14 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l15 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l16 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2700000>;
+ };
+ l17 {
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ };
+ l18 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2900000>;
+ };
+ l19 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ };
+ l20 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ regulator-allow-set-load;
+ };
+ l21 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ };
+ l22 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+ l23 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ };
+ l24 {
+ regulator-min-microvolt = <3075000>;
+ regulator-max-microvolt = <3075000>;
+ };
+ l25 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-allow-set-load;
+ };
+ l27 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ };
+ l28 {
+ regulator-min-microvolt = <925000>;
+ regulator-max-microvolt = <925000>;
+ regulator-allow-set-load;
+ };
+ l29 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ };
+ l30 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l32 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ };
+ };
+ };
+ adsp-pil {
+ power-domains = <&gcc HLOS1_VOTE_LPASS_ADSP_GDSC>;
+ smd-edge {
+ apr {
+ iommus = <&lpass_q6_smmu 1>;
+ audio {
+ compatible = "qcom,apq8096-sndcard";
+ qcom,model = "DB820c";
+ qcom,audio-routing =
+ "RX_BIAS", "MCLK";
+
+ fe@1 {
+ is-fe;
+ link-name = "MultiMedia1 Playback";
+ cpu {
+ sound-dai = <&q6asm MSM_FRONTEND_DAI_MULTIMEDIA1>;
+ };
+ platform {
+ sound-dai = <&q6asm MSM_FRONTEND_DAI_MULTIMEDIA1>;
+ };
+ };
+
+ be@1 {
+ link-name = "HDMI Playback";
+ cpu {
+ sound-dai = <&q6afe AFE_PORT_HDMI_RX>;
+ };
+
+ platform {
+ sound-dai = <&q6adm>;
+ };
+
+ codec {
+ sound-dai = <&hdmi 0>;
+ };
+ };
+ };
+ };
+ };
+ };
+};
diff --git a/rr-cache/c5977ed54f42f1f89a5a8eaa7cd413fc62def31b/preimage b/rr-cache/c5977ed54f42f1f89a5a8eaa7cd413fc62def31b/preimage
new file mode 100644
index 0000000..383ceb3
--- /dev/null
+++ b/rr-cache/c5977ed54f42f1f89a5a8eaa7cd413fc62def31b/preimage
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_COMMON_CLK_QCOM) += clk-qcom.o
+
+clk-qcom-y += common.o
+clk-qcom-y += clk-regmap.o
+clk-qcom-y += clk-alpha-pll.o
+clk-qcom-y += clk-pll.o
+clk-qcom-y += clk-rcg.o
+clk-qcom-y += clk-rcg2.o
+clk-qcom-y += clk-branch.o
+clk-qcom-y += clk-regmap-divider.o
+clk-qcom-y += clk-regmap-mux.o
+clk-qcom-y += clk-regmap-mux-div.o
+clk-qcom-y += reset.o
+clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o
+
+# Keep alphabetically sorted by config
+obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
+obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o
+obj-$(CONFIG_IPQ_GCC_4019) += gcc-ipq4019.o
+obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
+obj-$(CONFIG_IPQ_GCC_8074) += gcc-ipq8074.o
+obj-$(CONFIG_IPQ_LCC_806X) += lcc-ipq806x.o
+obj-$(CONFIG_MDM_GCC_9615) += gcc-mdm9615.o
+obj-$(CONFIG_MDM_LCC_9615) += lcc-mdm9615.o
+obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o
+obj-$(CONFIG_MSM_GCC_8916) += gcc-msm8916.o
+obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
+obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o
+obj-$(CONFIG_MSM_GCC_8994) += gcc-msm8994.o
+obj-$(CONFIG_MSM_GCC_8996) += gcc-msm8996.o
+obj-$(CONFIG_MSM_LCC_8960) += lcc-msm8960.o
+obj-$(CONFIG_MSM_GCC_8998) += gcc-msm8998.o
+obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o
+obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
+obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o
+obj-$(CONFIG_MSM_APCC_8996) += clk-cpu-8996.o
+obj-$(CONFIG_QCOM_A53PLL) += a53-pll.o
+obj-$(CONFIG_QCOM_CLK_APCS_MSM8916) += apcs-msm8916.o
+obj-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o
+obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
+<<<<<<<
+obj-$(CONFIG_SDM_GCC_845) += gcc-sdm845.o
+=======
+obj-$(CONFIG_SDM_GCC_845) += gcc-sdm845.o
+obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o
+>>>>>>>
+obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o
diff --git a/rr-cache/c5977ed54f42f1f89a5a8eaa7cd413fc62def31b/preimage.1 b/rr-cache/c5977ed54f42f1f89a5a8eaa7cd413fc62def31b/preimage.1
new file mode 100644
index 0000000..383ceb3
--- /dev/null
+++ b/rr-cache/c5977ed54f42f1f89a5a8eaa7cd413fc62def31b/preimage.1
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_COMMON_CLK_QCOM) += clk-qcom.o
+
+clk-qcom-y += common.o
+clk-qcom-y += clk-regmap.o
+clk-qcom-y += clk-alpha-pll.o
+clk-qcom-y += clk-pll.o
+clk-qcom-y += clk-rcg.o
+clk-qcom-y += clk-rcg2.o
+clk-qcom-y += clk-branch.o
+clk-qcom-y += clk-regmap-divider.o
+clk-qcom-y += clk-regmap-mux.o
+clk-qcom-y += clk-regmap-mux-div.o
+clk-qcom-y += reset.o
+clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o
+
+# Keep alphabetically sorted by config
+obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
+obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o
+obj-$(CONFIG_IPQ_GCC_4019) += gcc-ipq4019.o
+obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
+obj-$(CONFIG_IPQ_GCC_8074) += gcc-ipq8074.o
+obj-$(CONFIG_IPQ_LCC_806X) += lcc-ipq806x.o
+obj-$(CONFIG_MDM_GCC_9615) += gcc-mdm9615.o
+obj-$(CONFIG_MDM_LCC_9615) += lcc-mdm9615.o
+obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o
+obj-$(CONFIG_MSM_GCC_8916) += gcc-msm8916.o
+obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
+obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o
+obj-$(CONFIG_MSM_GCC_8994) += gcc-msm8994.o
+obj-$(CONFIG_MSM_GCC_8996) += gcc-msm8996.o
+obj-$(CONFIG_MSM_LCC_8960) += lcc-msm8960.o
+obj-$(CONFIG_MSM_GCC_8998) += gcc-msm8998.o
+obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o
+obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
+obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o
+obj-$(CONFIG_MSM_APCC_8996) += clk-cpu-8996.o
+obj-$(CONFIG_QCOM_A53PLL) += a53-pll.o
+obj-$(CONFIG_QCOM_CLK_APCS_MSM8916) += apcs-msm8916.o
+obj-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o
+obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
+<<<<<<<
+obj-$(CONFIG_SDM_GCC_845) += gcc-sdm845.o
+=======
+obj-$(CONFIG_SDM_GCC_845) += gcc-sdm845.o
+obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o
+>>>>>>>
+obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o
diff --git a/rr-cache/c5977ed54f42f1f89a5a8eaa7cd413fc62def31b/preimage.2 b/rr-cache/c5977ed54f42f1f89a5a8eaa7cd413fc62def31b/preimage.2
new file mode 100644
index 0000000..df41b00
--- /dev/null
+++ b/rr-cache/c5977ed54f42f1f89a5a8eaa7cd413fc62def31b/preimage.2
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_COMMON_CLK_QCOM) += clk-qcom.o
+
+clk-qcom-y += common.o
+clk-qcom-y += clk-regmap.o
+clk-qcom-y += clk-alpha-pll.o
+clk-qcom-y += clk-pll.o
+clk-qcom-y += clk-rcg.o
+clk-qcom-y += clk-rcg2.o
+clk-qcom-y += clk-branch.o
+clk-qcom-y += clk-regmap-divider.o
+clk-qcom-y += clk-regmap-mux.o
+clk-qcom-y += clk-regmap-mux-div.o
+clk-qcom-y += reset.o
+clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o
+
+# Keep alphabetically sorted by config
+obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
+obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o
+obj-$(CONFIG_IPQ_GCC_4019) += gcc-ipq4019.o
+obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
+obj-$(CONFIG_IPQ_GCC_8074) += gcc-ipq8074.o
+obj-$(CONFIG_IPQ_LCC_806X) += lcc-ipq806x.o
+obj-$(CONFIG_MDM_GCC_9615) += gcc-mdm9615.o
+obj-$(CONFIG_MDM_LCC_9615) += lcc-mdm9615.o
+obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o
+obj-$(CONFIG_MSM_GCC_8916) += gcc-msm8916.o
+obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
+obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o
+obj-$(CONFIG_MSM_GCC_8994) += gcc-msm8994.o
+obj-$(CONFIG_MSM_GCC_8996) += gcc-msm8996.o
+obj-$(CONFIG_MSM_LCC_8960) += lcc-msm8960.o
+obj-$(CONFIG_MSM_GCC_8998) += gcc-msm8998.o
+obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o
+obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
+obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o
+obj-$(CONFIG_QCOM_A53PLL) += a53-pll.o
+obj-$(CONFIG_QCOM_CLK_APCS_MSM8916) += apcs-msm8916.o
+obj-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o
+obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
+<<<<<<<
+obj-$(CONFIG_SDM_GCC_845) += gcc-sdm845.o
+=======
+obj-$(CONFIG_SDM_GCC_845) += gcc-sdm845.o
+obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o
+>>>>>>>
+obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o
diff --git a/rr-cache/c5977ed54f42f1f89a5a8eaa7cd413fc62def31b/preimage.3 b/rr-cache/c5977ed54f42f1f89a5a8eaa7cd413fc62def31b/preimage.3
new file mode 100644
index 0000000..2b664fc
--- /dev/null
+++ b/rr-cache/c5977ed54f42f1f89a5a8eaa7cd413fc62def31b/preimage.3
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_COMMON_CLK_QCOM) += clk-qcom.o
+
+clk-qcom-y += common.o
+clk-qcom-y += clk-regmap.o
+clk-qcom-y += clk-alpha-pll.o
+clk-qcom-y += clk-pll.o
+clk-qcom-y += clk-rcg.o
+clk-qcom-y += clk-rcg2.o
+clk-qcom-y += clk-branch.o
+clk-qcom-y += clk-regmap-divider.o
+clk-qcom-y += clk-regmap-mux.o
+clk-qcom-y += clk-regmap-mux-div.o
+clk-qcom-y += reset.o
+clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o
+
+# Keep alphabetically sorted by config
+obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
+obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o
+obj-$(CONFIG_IPQ_GCC_4019) += gcc-ipq4019.o
+obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
+obj-$(CONFIG_IPQ_GCC_8074) += gcc-ipq8074.o
+obj-$(CONFIG_IPQ_LCC_806X) += lcc-ipq806x.o
+obj-$(CONFIG_MDM_GCC_9615) += gcc-mdm9615.o
+obj-$(CONFIG_MDM_LCC_9615) += lcc-mdm9615.o
+obj-$(CONFIG_MSM_CLK_RPMH) += clk-rpmh.o
+obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o
+obj-$(CONFIG_MSM_GCC_8916) += gcc-msm8916.o
+obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
+obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o
+obj-$(CONFIG_MSM_GCC_8994) += gcc-msm8994.o
+obj-$(CONFIG_MSM_GCC_8996) += gcc-msm8996.o
+obj-$(CONFIG_MSM_LCC_8960) += lcc-msm8960.o
+obj-$(CONFIG_MSM_GCC_8998) += gcc-msm8998.o
+obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o
+obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
+obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o
+obj-$(CONFIG_MSM_APCC_8996) += clk-cpu-8996.o
+obj-$(CONFIG_QCOM_A53PLL) += a53-pll.o
+obj-$(CONFIG_QCOM_CLK_APCS_MSM8916) += apcs-msm8916.o
+obj-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o
+obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
+<<<<<<<
+obj-$(CONFIG_SDM_GCC_845) += gcc-sdm845.o
+=======
+obj-$(CONFIG_SDM_GCC_845) += gcc-sdm845.o
+obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o
+>>>>>>>
+obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o
diff --git a/rr-cache/c8ab4f7b2ace1df1c0cf9f2a387abd7512dbd5e2/postimage b/rr-cache/c8ab4f7b2ace1df1c0cf9f2a387abd7512dbd5e2/postimage
new file mode 100644
index 0000000..01920ee
--- /dev/null
+++ b/rr-cache/c8ab4f7b2ace1df1c0cf9f2a387abd7512dbd5e2/postimage
@@ -0,0 +1,712 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_NUMA_BALANCING=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_USER_NS=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_JUMP_LABEL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_ARCH_SUNXI=y
+CONFIG_ARCH_ALPINE=y
+CONFIG_ARCH_BCM2835=y
+CONFIG_ARCH_BCM_IPROC=y
+CONFIG_ARCH_BERLIN=y
+CONFIG_ARCH_BRCMSTB=y
+CONFIG_ARCH_EXYNOS=y
+CONFIG_ARCH_LAYERSCAPE=y
+CONFIG_ARCH_LG1K=y
+CONFIG_ARCH_HISI=y
+CONFIG_ARCH_MEDIATEK=y
+CONFIG_ARCH_MESON=y
+CONFIG_ARCH_MVEBU=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_ROCKCHIP=y
+CONFIG_ARCH_SEATTLE=y
+CONFIG_ARCH_RENESAS=y
+CONFIG_ARCH_R8A7795=y
+CONFIG_ARCH_R8A7796=y
+CONFIG_ARCH_R8A77965=y
+CONFIG_ARCH_R8A77970=y
+CONFIG_ARCH_R8A77980=y
+CONFIG_ARCH_R8A77990=y
+CONFIG_ARCH_R8A77995=y
+CONFIG_ARCH_STRATIX10=y
+CONFIG_ARCH_TEGRA=y
+CONFIG_ARCH_SPRD=y
+CONFIG_ARCH_SYNQUACER=y
+CONFIG_ARCH_THUNDER=y
+CONFIG_ARCH_THUNDER2=y
+CONFIG_ARCH_UNIPHIER=y
+CONFIG_ARCH_VEXPRESS=y
+CONFIG_ARCH_XGENE=y
+CONFIG_ARCH_ZX=y
+CONFIG_ARCH_ZYNQMP=y
+CONFIG_PCI=y
+CONFIG_HOTPLUG_PCI_PCIE=y
+CONFIG_PCI_IOV=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_ACPI=y
+CONFIG_PCI_LAYERSCAPE=y
+CONFIG_PCI_HISI=y
+CONFIG_PCIE_QCOM=y
+CONFIG_PCIE_KIRIN=y
+CONFIG_PCIE_ARMADA_8K=y
+CONFIG_PCIE_HISI_STB=y
+CONFIG_PCI_AARDVARK=y
+CONFIG_PCI_TEGRA=y
+CONFIG_PCIE_RCAR=y
+CONFIG_PCIE_ROCKCHIP=y
+CONFIG_PCIE_ROCKCHIP_HOST=m
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PCI_XGENE=y
+CONFIG_PCI_HOST_THUNDER_PEM=y
+CONFIG_PCI_HOST_THUNDER_ECAM=y
+CONFIG_ARM64_VA_BITS_48=y
+CONFIG_SCHED_MC=y
+CONFIG_NUMA=y
+CONFIG_PREEMPT=y
+CONFIG_KSM=y
+CONFIG_MEMORY_FAILURE=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CMA=y
+CONFIG_SECCOMP=y
+CONFIG_KEXEC=y
+CONFIG_CRASH_DUMP=y
+CONFIG_XEN=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_COMPAT=y
+CONFIG_HIBERNATION=y
+CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_ATTR_SET=y
+CONFIG_CPU_FREQ_GOV_COMMON=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=m
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPUFREQ_DT=y
+CONFIG_ARM_ARMADA_37XX_CPUFREQ=y
+CONFIG_ARM_BIG_LITTLE_CPUFREQ=y
+CONFIG_ARM_SCPI_CPUFREQ=y
+CONFIG_ARM_TEGRA186_CPUFREQ=y
+CONFIG_ACPI_CPPC_CPUFREQ=m
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IPV6=m
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_NAT=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_BRIDGE=m
+CONFIG_BRIDGE_VLAN_FILTERING=y
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_VLAN_8021Q_MVRP=y
+CONFIG_BPF_JIT=y
+CONFIG_BT=m
+CONFIG_BT_HIDP=m
+# CONFIG_BT_HS is not set
+# CONFIG_BT_LE is not set
+CONFIG_BT_LEDS=y
+# CONFIG_BT_DEBUGFS is not set
+CONFIG_BT_HCIBTUSB=m
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_LL=y
+CONFIG_BT_HCIUART_BCM=y
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
+CONFIG_MAC80211_LEDS=y
+CONFIG_RFKILL=m
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=32
+CONFIG_HISILICON_LPC=y
+CONFIG_SIMPLE_PM_BUS=y
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_DENALI_DT=y
+CONFIG_MTD_NAND_MARVELL=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_NBD=m
+CONFIG_VIRTIO_BLK=y
+CONFIG_BLK_DEV_NVME=m
+CONFIG_SRAM=y
+CONFIG_EEPROM_AT25=m
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+CONFIG_SCSI_SAS_ATA=y
+CONFIG_SCSI_HISI_SAS=y
+CONFIG_SCSI_HISI_SAS_PCI=y
+CONFIG_SCSI_UFSHCD=m
+CONFIG_SCSI_UFSHCD_PLATFORM=m
+CONFIG_SCSI_UFS_QCOM=m
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_AHCI_CEVA=y
+CONFIG_AHCI_MVEBU=y
+CONFIG_AHCI_XGENE=y
+CONFIG_AHCI_QORIQ=y
+CONFIG_SATA_SIL24=y
+CONFIG_SATA_RCAR=y
+CONFIG_PATA_PLATFORM=y
+CONFIG_PATA_OF_PLATFORM=y
+CONFIG_NETDEVICES=y
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_TUN=y
+CONFIG_VETH=m
+CONFIG_VIRTIO_NET=y
+CONFIG_AMD_XGBE=y
+CONFIG_NET_XGENE=y
+CONFIG_ATL1C=m
+CONFIG_MACB=y
+CONFIG_THUNDER_NIC_PF=y
+CONFIG_HIX5HD2_GMAC=y
+CONFIG_HNS_DSAF=y
+CONFIG_HNS_ENET=y
+CONFIG_E1000E=y
+CONFIG_IGB=y
+CONFIG_IGBVF=y
+CONFIG_MVNETA=y
+CONFIG_MVPP2=y
+CONFIG_SKY2=y
+CONFIG_QCOM_EMAC=m
+CONFIG_RAVB=y
+CONFIG_SMC91X=y
+CONFIG_SMSC911X=y
+CONFIG_SNI_AVE=y
+CONFIG_SNI_NETSEC=y
+CONFIG_STMMAC_ETH=m
+CONFIG_DWMAC_IPQ806X=m
+CONFIG_DWMAC_MESON=m
+CONFIG_DWMAC_ROCKCHIP=m
+CONFIG_DWMAC_SUNXI=m
+CONFIG_DWMAC_SUN8I=m
+CONFIG_MDIO_BUS_MUX_MMIOREG=y
+CONFIG_AT803X_PHY=m
+CONFIG_MARVELL_PHY=m
+CONFIG_MARVELL_10G_PHY=m
+CONFIG_MESON_GXL_PHY=m
+CONFIG_MICREL_PHY=y
+CONFIG_REALTEK_PHY=m
+CONFIG_ROCKCHIP_PHY=y
+CONFIG_USB_PEGASUS=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_RTL8152=m
+CONFIG_USB_LAN78XX=m
+CONFIG_USB_USBNET=m
+CONFIG_USB_NET_DM9601=m
+CONFIG_USB_NET_SR9800=m
+CONFIG_USB_NET_SMSC75XX=m
+CONFIG_USB_NET_SMSC95XX=m
+CONFIG_USB_NET_PLUSB=m
+CONFIG_USB_NET_MCS7830=m
+CONFIG_ATH10K=m
+CONFIG_ATH10K_PCI=m
+CONFIG_BRCMFMAC=m
+CONFIG_MWIFIEX=m
+CONFIG_MWIFIEX_PCIE=m
+CONFIG_WL18XX=m
+CONFIG_WLCORE_SDIO=m
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_ADC=m
+CONFIG_KEYBOARD_CROS_EC=y
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ATMEL_MXT=m
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_PM8941_PWRKEY=y
+CONFIG_INPUT_HISI_POWERKEY=y
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_AMBAKMI=y
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_BCM2835AUX=y
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_8250_MT6577=y
+CONFIG_SERIAL_8250_UNIPHIER=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_MESON=y
+CONFIG_SERIAL_MESON_CONSOLE=y
+CONFIG_SERIAL_SAMSUNG=y
+CONFIG_SERIAL_SAMSUNG_CONSOLE=y
+CONFIG_SERIAL_TEGRA=y
+CONFIG_SERIAL_SH_SCI=y
+CONFIG_SERIAL_SH_SCI_NR_UARTS=11
+CONFIG_SERIAL_SH_SCI_CONSOLE=y
+CONFIG_SERIAL_MSM=y
+CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_SERIAL_XILINX_PS_UART=y
+CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
+CONFIG_SERIAL_MVEBU_UART=y
+CONFIG_SERIAL_DEV_BUS=y
+CONFIG_SERIAL_DEV_CTRL_TTYPORT=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_I2C_HID=m
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_MUX_PCA954x=y
+CONFIG_I2C_BCM2835=m
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
+CONFIG_I2C_IMX=y
+CONFIG_I2C_MESON=y
+CONFIG_I2C_MV64XXX=y
+CONFIG_I2C_PXA=y
+CONFIG_I2C_QUP=y
+CONFIG_I2C_RK3X=y
+CONFIG_I2C_SH_MOBILE=y
+CONFIG_I2C_TEGRA=y
+CONFIG_I2C_UNIPHIER_F=y
+CONFIG_I2C_RCAR=y
+CONFIG_I2C_CROS_EC_TUNNEL=y
+CONFIG_SPI=y
+CONFIG_SPI_ARMADA_3700=y
+CONFIG_SPI_MESON_SPICC=m
+CONFIG_SPI_MESON_SPIFC=m
+CONFIG_SPI_BCM2835=m
+CONFIG_SPI_BCM2835AUX=m
+CONFIG_SPI_ORION=y
+CONFIG_SPI_PL022=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_ROCKCHIP=y
+CONFIG_SPI_S3C64XX=y
+CONFIG_SPI_SPIDEV=m
+CONFIG_SPMI=y
+CONFIG_PINCTRL_IPQ8074=y
+CONFIG_PINCTRL_SINGLE=y
+CONFIG_PINCTRL_MAX77620=y
+CONFIG_PINCTRL_MSM8916=y
+CONFIG_PINCTRL_MSM8994=y
+CONFIG_PINCTRL_MSM8996=y
+CONFIG_PINCTRL_MT7622=y
+CONFIG_PINCTRL_QDF2XXX=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_GPIO_DWAPB=y
+CONFIG_GPIO_MB86S7X=y
+CONFIG_GPIO_PL061=y
+CONFIG_GPIO_RCAR=y
+CONFIG_GPIO_UNIPHIER=y
+CONFIG_GPIO_XGENE=y
+CONFIG_GPIO_XGENE_SB=y
+CONFIG_GPIO_PCA953X=y
+CONFIG_GPIO_PCA953X_IRQ=y
+CONFIG_GPIO_MAX77620=y
+CONFIG_POWER_AVS=y
+CONFIG_ROCKCHIP_IODOMAIN=y
+CONFIG_QCOM_CPR=y
+CONFIG_POWER_RESET_MSM=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_SYSCON_REBOOT_MODE=y
+CONFIG_BATTERY_BQ27XXX=y
+CONFIG_SENSORS_ARM_SCPI=y
+CONFIG_SENSORS_LM90=m
+CONFIG_SENSORS_INA2XX=m
+CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
+CONFIG_CPU_THERMAL=y
+CONFIG_THERMAL_EMULATION=y
+CONFIG_ARMADA_THERMAL=y
+CONFIG_BRCMSTB_THERMAL=m
+CONFIG_EXYNOS_THERMAL=y
+CONFIG_RCAR_GEN3_THERMAL=y
+CONFIG_QCOM_TSENS=y
+CONFIG_ROCKCHIP_THERMAL=m
+CONFIG_TEGRA_BPMP_THERMAL=m
+CONFIG_UNIPHIER_THERMAL=y
+CONFIG_WATCHDOG=y
+CONFIG_S3C2410_WATCHDOG=y
+CONFIG_MESON_GXBB_WATCHDOG=m
+CONFIG_MESON_WATCHDOG=m
+CONFIG_RENESAS_WDT=y
+CONFIG_UNIPHIER_WATCHDOG=y
+CONFIG_BCM2835_WDT=y
+CONFIG_MFD_AXP20X_RSB=y
+CONFIG_MFD_CROS_EC=y
+CONFIG_MFD_CROS_EC_I2C=y
+CONFIG_MFD_CROS_EC_SPI=y
+CONFIG_MFD_CROS_EC_CHARDEV=m
+CONFIG_MFD_EXYNOS_LPASS=m
+CONFIG_MFD_HI6421_PMIC=y
+CONFIG_MFD_HI655X_PMIC=y
+CONFIG_MFD_MAX77620=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_RK808=y
+CONFIG_MFD_SEC_CORE=y
+CONFIG_REGULATOR_AXP20X=y
+CONFIG_REGULATOR_FAN53555=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_GPIO=y
+CONFIG_REGULATOR_HI6421V530=y
+CONFIG_REGULATOR_HI655X=y
+CONFIG_REGULATOR_MAX77620=y
+CONFIG_REGULATOR_PWM=y
+CONFIG_REGULATOR_QCOM_SMD_RPM=y
+CONFIG_REGULATOR_QCOM_SPMI=y
+CONFIG_REGULATOR_RK808=y
+CONFIG_REGULATOR_S2MPS11=y
+CONFIG_MEDIA_SUPPORT=m
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_ANALOG_TV_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_MEDIA_RC_SUPPORT=y
+CONFIG_RC_CORE=m
+CONFIG_RC_DEVICES=y
+CONFIG_RC_DECODERS=y
+CONFIG_IR_MESON=m
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+# CONFIG_DVB_NET is not set
+CONFIG_V4L_MEM2MEM_DRIVERS=y
+CONFIG_VIDEO_SAMSUNG_S5P_JPEG=m
+CONFIG_VIDEO_SAMSUNG_S5P_MFC=m
+CONFIG_VIDEO_SAMSUNG_EXYNOS_GSC=m
+CONFIG_VIDEO_RENESAS_FCP=m
+CONFIG_VIDEO_RENESAS_VSP1=m
+CONFIG_DRM=m
+CONFIG_DRM_NOUVEAU=m
+CONFIG_DRM_EXYNOS=m
+CONFIG_DRM_EXYNOS5433_DECON=y
+CONFIG_DRM_EXYNOS7_DECON=y
+CONFIG_DRM_EXYNOS_DSI=y
+# CONFIG_DRM_EXYNOS_DP is not set
+CONFIG_DRM_EXYNOS_HDMI=y
+CONFIG_DRM_EXYNOS_MIC=y
+CONFIG_DRM_ROCKCHIP=m
+CONFIG_ROCKCHIP_ANALOGIX_DP=y
+CONFIG_ROCKCHIP_CDN_DP=y
+CONFIG_ROCKCHIP_DW_HDMI=y
+CONFIG_ROCKCHIP_DW_MIPI_DSI=y
+CONFIG_ROCKCHIP_INNO_HDMI=y
+CONFIG_DRM_RCAR_DU=m
+CONFIG_DRM_RCAR_LVDS=y
+CONFIG_DRM_RCAR_VSP=y
+CONFIG_DRM_TEGRA=m
+CONFIG_DRM_PANEL_SIMPLE=m
+CONFIG_DRM_I2C_ADV7511=m
+CONFIG_DRM_VC4=m
+CONFIG_DRM_HISI_HIBMC=m
+CONFIG_DRM_HISI_KIRIN=m
+CONFIG_DRM_MESON=m
+CONFIG_FB=y
+CONFIG_FB_ARMCLCD=y
+CONFIG_BACKLIGHT_GENERIC=m
+CONFIG_BACKLIGHT_PWM=m
+CONFIG_BACKLIGHT_LP855X=m
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SND_BCM2835_SOC_I2S=m
+CONFIG_SND_SOC_SAMSUNG=y
+CONFIG_SND_SOC_RCAR=m
+CONFIG_SND_SOC_AK4613=m
+CONFIG_SND_SIMPLE_CARD=m
+CONFIG_SND_AUDIO_GRAPH_CARD=m
+CONFIG_USB=y
+CONFIG_USB_OTG=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_TEGRA=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_EXYNOS=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_EXYNOS=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_RENESAS_USBHS=m
+CONFIG_USB_STORAGE=y
+CONFIG_USB_MUSB_HDRC=y
+CONFIG_USB_MUSB_SUNXI=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC2=y
+CONFIG_USB_CHIPIDEA=y
+CONFIG_USB_CHIPIDEA_UDC=y
+CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_USB_CHIPIDEA_ULPI=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_HSIC_USB3503=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_ULPI=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_RENESAS_USBHS_UDC=m
+CONFIG_USB_RENESAS_USB3=m
+CONFIG_USB_ULPI_BUS=y
+CONFIG_MMC=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_ARMMMCI=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_ACPI=y
+CONFIG_MMC_SDHCI_F_SDH30=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_OF_ARASAN=y
+CONFIG_MMC_SDHCI_OF_ESDHC=y
+CONFIG_MMC_SDHCI_CADENCE=y
+CONFIG_MMC_SDHCI_TEGRA=y
+CONFIG_MMC_MESON_GX=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_SPI=y
+CONFIG_MMC_SDHI=y
+CONFIG_MMC_DW=y
+CONFIG_MMC_DW_EXYNOS=y
+CONFIG_MMC_DW_HI3798CV200=y
+CONFIG_MMC_DW_K3=y
+CONFIG_MMC_DW_ROCKCHIP=y
+CONFIG_MMC_SUNXI=y
+CONFIG_MMC_BCM2835=y
+CONFIG_MMC_SDHCI_XENON=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_PWM=y
+CONFIG_LEDS_SYSCON=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_LEDS_TRIGGER_PANIC=y
+CONFIG_LEDS_TRIGGER_DISK=y
+CONFIG_EDAC=y
+CONFIG_EDAC_GHES=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_MAX77686=y
+CONFIG_RTC_DRV_RK808=m
+CONFIG_RTC_DRV_S5M=y
+CONFIG_RTC_DRV_DS3232=y
+CONFIG_RTC_DRV_EFI=y
+CONFIG_RTC_DRV_S3C=y
+CONFIG_RTC_DRV_PL031=y
+CONFIG_RTC_DRV_SUN6I=y
+CONFIG_RTC_DRV_ARMADA38X=y
+CONFIG_RTC_DRV_TEGRA=y
+CONFIG_RTC_DRV_XGENE=y
+CONFIG_RTC_DRV_CROS_EC=y
+CONFIG_DMADEVICES=y
+CONFIG_DMA_BCM2835=m
+CONFIG_K3_DMA=y
+CONFIG_MV_XOR_V2=y
+CONFIG_PL330_DMA=y
+CONFIG_TEGRA20_APB_DMA=y
+CONFIG_QCOM_BAM_DMA=y
+CONFIG_QCOM_HIDMA_MGMT=y
+CONFIG_QCOM_HIDMA=y
+CONFIG_RCAR_DMAC=y
+CONFIG_RENESAS_USB_DMAC=m
+CONFIG_VFIO=y
+CONFIG_VFIO_PCI=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_XEN_GNTDEV=y
+CONFIG_XEN_GRANT_DEV_ALLOC=y
+CONFIG_COMMON_CLK_RK808=y
+CONFIG_COMMON_CLK_SCPI=y
+CONFIG_COMMON_CLK_CS2000_CP=y
+CONFIG_COMMON_CLK_S2MPS11=y
+CONFIG_CLK_QORIQ=y
+CONFIG_COMMON_CLK_PWM=y
+CONFIG_COMMON_CLK_QCOM=y
+CONFIG_QCOM_CLK_SMD_RPM=y
+CONFIG_IPQ_GCC_8074=y
+CONFIG_MSM_GCC_8916=y
+CONFIG_MSM_GCC_8994=y
+CONFIG_MSM_MMCC_8996=y
+CONFIG_HWSPINLOCK=y
+CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_ARM_MHU=y
+CONFIG_PLATFORM_MHU=y
+CONFIG_BCM2835_MBOX=y
+CONFIG_HI6220_MBOX=y
+CONFIG_QCOM_APCS_IPC=y
+CONFIG_ROCKCHIP_IOMMU=y
+CONFIG_TEGRA_IOMMU_SMMU=y
+CONFIG_ARM_SMMU=y
+CONFIG_ARM_SMMU_V3=y
+CONFIG_QCOM_IOMMU=y
+CONFIG_RPMSG_QCOM_GLINK_RPM=y
+CONFIG_RPMSG_QCOM_SMD=y
+CONFIG_RASPBERRYPI_POWER=y
+CONFIG_QCOM_SMEM=y
+CONFIG_QCOM_SMD_RPM=y
+CONFIG_QCOM_SMP2P=y
+CONFIG_QCOM_SMSM=y
+CONFIG_ROCKCHIP_PM_DOMAINS=y
+CONFIG_ARCH_TEGRA_132_SOC=y
+CONFIG_ARCH_TEGRA_210_SOC=y
+CONFIG_ARCH_TEGRA_186_SOC=y
+CONFIG_ARCH_TEGRA_194_SOC=y
+CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
+CONFIG_EXTCON_USB_GPIO=y
+CONFIG_EXTCON_USBC_CROS_EC=y
+CONFIG_MEMORY=y
+CONFIG_TEGRA_MC=y
+CONFIG_IIO=y
+CONFIG_EXYNOS_ADC=y
+CONFIG_ROCKCHIP_SARADC=m
+CONFIG_IIO_CROS_EC_SENSORS_CORE=m
+CONFIG_IIO_CROS_EC_SENSORS=m
+CONFIG_IIO_CROS_EC_LIGHT_PROX=m
+CONFIG_IIO_CROS_EC_BARO=m
+CONFIG_PWM=y
+CONFIG_PWM_BCM2835=m
+CONFIG_PWM_CROS_EC=m
+CONFIG_PWM_MESON=m
+CONFIG_PWM_RCAR=m
+CONFIG_PWM_ROCKCHIP=y
+CONFIG_PWM_SAMSUNG=y
+CONFIG_PWM_TEGRA=m
+CONFIG_PHY_HISTB_COMBPHY=y
+CONFIG_PHY_HISI_INNO_USB2=y
+CONFIG_PHY_RCAR_GEN3_USB2=y
+CONFIG_PHY_RCAR_GEN3_USB3=m
+CONFIG_PHY_HI6220_USB=y
+CONFIG_PHY_QCOM_USB_HS=y
+CONFIG_PHY_SUN4I_USB=y
+CONFIG_PHY_MVEBU_CP110_COMPHY=y
+CONFIG_PHY_QCOM_QMP=m
+CONFIG_PHY_ROCKCHIP_INNO_USB2=y
+CONFIG_PHY_ROCKCHIP_EMMC=y
+CONFIG_PHY_ROCKCHIP_PCIE=m
+CONFIG_PHY_ROCKCHIP_TYPEC=y
+CONFIG_PHY_XGENE=y
+CONFIG_PHY_TEGRA_XUSB=y
+CONFIG_QCOM_L2_PMU=y
+CONFIG_QCOM_L3_PMU=y
+CONFIG_MESON_EFUSE=m
+CONFIG_QCOM_QFPROM=y
+CONFIG_ROCKCHIP_EFUSE=y
+CONFIG_UNIPHIER_EFUSE=y
+CONFIG_TEE=y
+CONFIG_OPTEE=y
+CONFIG_ARM_SCPI_PROTOCOL=y
+CONFIG_RASPBERRYPI_FIRMWARE=y
+CONFIG_EFI_CAPSULE_LOADER=y
+CONFIG_ACPI=y
+CONFIG_ACPI_APEI=y
+CONFIG_ACPI_APEI_GHES=y
+CONFIG_ACPI_APEI_PCIEAER=y
+CONFIG_ACPI_APEI_MEMORY_FAILURE=y
+CONFIG_ACPI_APEI_EINJ=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
+CONFIG_QUOTA=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
+CONFIG_VFAT_FS=y
+CONFIG_HUGETLBFS=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_EFIVAR_FS=y
+CONFIG_SQUASHFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
+CONFIG_9P_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_LOCKUP_DETECTOR=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_FTRACE is not set
+CONFIG_MEMTEST=y
+CONFIG_SECURITY=y
+CONFIG_CRYPTO_ECHAINIV=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA256_ARM64=m
+CONFIG_CRYPTO_SHA512_ARM64=m
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m
+CONFIG_CRYPTO_CRC32_ARM64_CE=m
+CONFIG_CRYPTO_AES_ARM64=m
+CONFIG_CRYPTO_AES_ARM64_CE=m
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=m
+CONFIG_CRYPTO_CHACHA20_NEON=m
+CONFIG_CRYPTO_AES_ARM64_BS=m
+CONFIG_CRYPTO_SHA512_ARM64_CE=m
+CONFIG_CRYPTO_SHA3_ARM64=m
+CONFIG_CRYPTO_SM3_ARM64_CE=m
diff --git a/rr-cache/c8ab4f7b2ace1df1c0cf9f2a387abd7512dbd5e2/preimage b/rr-cache/c8ab4f7b2ace1df1c0cf9f2a387abd7512dbd5e2/preimage
new file mode 100644
index 0000000..119d38e
--- /dev/null
+++ b/rr-cache/c8ab4f7b2ace1df1c0cf9f2a387abd7512dbd5e2/preimage
@@ -0,0 +1,715 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_NUMA_BALANCING=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_USER_NS=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_JUMP_LABEL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_ARCH_SUNXI=y
+CONFIG_ARCH_ALPINE=y
+CONFIG_ARCH_BCM2835=y
+CONFIG_ARCH_BCM_IPROC=y
+CONFIG_ARCH_BERLIN=y
+CONFIG_ARCH_BRCMSTB=y
+CONFIG_ARCH_EXYNOS=y
+CONFIG_ARCH_LAYERSCAPE=y
+CONFIG_ARCH_LG1K=y
+CONFIG_ARCH_HISI=y
+CONFIG_ARCH_MEDIATEK=y
+CONFIG_ARCH_MESON=y
+CONFIG_ARCH_MVEBU=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_ROCKCHIP=y
+CONFIG_ARCH_SEATTLE=y
+CONFIG_ARCH_RENESAS=y
+CONFIG_ARCH_R8A7795=y
+CONFIG_ARCH_R8A7796=y
+CONFIG_ARCH_R8A77965=y
+CONFIG_ARCH_R8A77970=y
+CONFIG_ARCH_R8A77980=y
+CONFIG_ARCH_R8A77990=y
+CONFIG_ARCH_R8A77995=y
+CONFIG_ARCH_STRATIX10=y
+CONFIG_ARCH_TEGRA=y
+CONFIG_ARCH_SPRD=y
+CONFIG_ARCH_SYNQUACER=y
+CONFIG_ARCH_THUNDER=y
+CONFIG_ARCH_THUNDER2=y
+CONFIG_ARCH_UNIPHIER=y
+CONFIG_ARCH_VEXPRESS=y
+CONFIG_ARCH_XGENE=y
+CONFIG_ARCH_ZX=y
+CONFIG_ARCH_ZYNQMP=y
+CONFIG_PCI=y
+CONFIG_HOTPLUG_PCI_PCIE=y
+CONFIG_PCI_IOV=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_ACPI=y
+CONFIG_PCI_LAYERSCAPE=y
+CONFIG_PCI_HISI=y
+CONFIG_PCIE_QCOM=y
+CONFIG_PCIE_KIRIN=y
+CONFIG_PCIE_ARMADA_8K=y
+CONFIG_PCIE_HISI_STB=y
+CONFIG_PCI_AARDVARK=y
+CONFIG_PCI_TEGRA=y
+CONFIG_PCIE_RCAR=y
+CONFIG_PCIE_ROCKCHIP=y
+CONFIG_PCIE_ROCKCHIP_HOST=m
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PCI_XGENE=y
+CONFIG_PCI_HOST_THUNDER_PEM=y
+CONFIG_PCI_HOST_THUNDER_ECAM=y
+CONFIG_ARM64_VA_BITS_48=y
+CONFIG_SCHED_MC=y
+CONFIG_NUMA=y
+CONFIG_PREEMPT=y
+CONFIG_KSM=y
+CONFIG_MEMORY_FAILURE=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CMA=y
+CONFIG_SECCOMP=y
+CONFIG_KEXEC=y
+CONFIG_CRASH_DUMP=y
+CONFIG_XEN=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_COMPAT=y
+CONFIG_HIBERNATION=y
+CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_ATTR_SET=y
+CONFIG_CPU_FREQ_GOV_COMMON=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=m
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPUFREQ_DT=y
+CONFIG_ARM_ARMADA_37XX_CPUFREQ=y
+CONFIG_ARM_BIG_LITTLE_CPUFREQ=y
+CONFIG_ARM_SCPI_CPUFREQ=y
+CONFIG_ARM_TEGRA186_CPUFREQ=y
+CONFIG_ACPI_CPPC_CPUFREQ=m
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IPV6=m
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_NAT=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_BRIDGE=m
+CONFIG_BRIDGE_VLAN_FILTERING=y
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_VLAN_8021Q_MVRP=y
+CONFIG_BPF_JIT=y
+CONFIG_BT=m
+CONFIG_BT_HIDP=m
+# CONFIG_BT_HS is not set
+# CONFIG_BT_LE is not set
+CONFIG_BT_LEDS=y
+# CONFIG_BT_DEBUGFS is not set
+CONFIG_BT_HCIBTUSB=m
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_LL=y
+CONFIG_BT_HCIUART_BCM=y
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
+CONFIG_MAC80211_LEDS=y
+CONFIG_RFKILL=m
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=32
+CONFIG_HISILICON_LPC=y
+CONFIG_SIMPLE_PM_BUS=y
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_DENALI_DT=y
+CONFIG_MTD_NAND_MARVELL=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_NBD=m
+CONFIG_VIRTIO_BLK=y
+CONFIG_BLK_DEV_NVME=m
+CONFIG_SRAM=y
+CONFIG_EEPROM_AT25=m
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+CONFIG_SCSI_SAS_ATA=y
+CONFIG_SCSI_HISI_SAS=y
+CONFIG_SCSI_HISI_SAS_PCI=y
+CONFIG_SCSI_UFSHCD=m
+CONFIG_SCSI_UFSHCD_PLATFORM=m
+CONFIG_SCSI_UFS_QCOM=m
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_AHCI_CEVA=y
+CONFIG_AHCI_MVEBU=y
+CONFIG_AHCI_XGENE=y
+CONFIG_AHCI_QORIQ=y
+CONFIG_SATA_SIL24=y
+CONFIG_SATA_RCAR=y
+CONFIG_PATA_PLATFORM=y
+CONFIG_PATA_OF_PLATFORM=y
+CONFIG_NETDEVICES=y
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_TUN=y
+CONFIG_VETH=m
+CONFIG_VIRTIO_NET=y
+CONFIG_AMD_XGBE=y
+CONFIG_NET_XGENE=y
+CONFIG_ATL1C=m
+CONFIG_MACB=y
+CONFIG_THUNDER_NIC_PF=y
+CONFIG_HIX5HD2_GMAC=y
+CONFIG_HNS_DSAF=y
+CONFIG_HNS_ENET=y
+CONFIG_E1000E=y
+CONFIG_IGB=y
+CONFIG_IGBVF=y
+CONFIG_MVNETA=y
+CONFIG_MVPP2=y
+CONFIG_SKY2=y
+CONFIG_QCOM_EMAC=m
+CONFIG_RAVB=y
+CONFIG_SMC91X=y
+CONFIG_SMSC911X=y
+CONFIG_SNI_AVE=y
+CONFIG_SNI_NETSEC=y
+CONFIG_STMMAC_ETH=m
+CONFIG_DWMAC_IPQ806X=m
+CONFIG_DWMAC_MESON=m
+CONFIG_DWMAC_ROCKCHIP=m
+CONFIG_DWMAC_SUNXI=m
+CONFIG_DWMAC_SUN8I=m
+CONFIG_MDIO_BUS_MUX_MMIOREG=y
+CONFIG_AT803X_PHY=m
+CONFIG_MARVELL_PHY=m
+CONFIG_MARVELL_10G_PHY=m
+CONFIG_MESON_GXL_PHY=m
+CONFIG_MICREL_PHY=y
+CONFIG_REALTEK_PHY=m
+CONFIG_ROCKCHIP_PHY=y
+CONFIG_USB_PEGASUS=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_RTL8152=m
+CONFIG_USB_LAN78XX=m
+CONFIG_USB_USBNET=m
+CONFIG_USB_NET_DM9601=m
+CONFIG_USB_NET_SR9800=m
+CONFIG_USB_NET_SMSC75XX=m
+CONFIG_USB_NET_SMSC95XX=m
+CONFIG_USB_NET_PLUSB=m
+CONFIG_USB_NET_MCS7830=m
+CONFIG_ATH10K=m
+CONFIG_ATH10K_PCI=m
+CONFIG_BRCMFMAC=m
+CONFIG_MWIFIEX=m
+CONFIG_MWIFIEX_PCIE=m
+CONFIG_WL18XX=m
+CONFIG_WLCORE_SDIO=m
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_ADC=m
+CONFIG_KEYBOARD_CROS_EC=y
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ATMEL_MXT=m
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_PM8941_PWRKEY=y
+CONFIG_INPUT_HISI_POWERKEY=y
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_AMBAKMI=y
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_BCM2835AUX=y
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_8250_MT6577=y
+CONFIG_SERIAL_8250_UNIPHIER=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_MESON=y
+CONFIG_SERIAL_MESON_CONSOLE=y
+CONFIG_SERIAL_SAMSUNG=y
+CONFIG_SERIAL_SAMSUNG_CONSOLE=y
+CONFIG_SERIAL_TEGRA=y
+CONFIG_SERIAL_SH_SCI=y
+CONFIG_SERIAL_SH_SCI_NR_UARTS=11
+CONFIG_SERIAL_SH_SCI_CONSOLE=y
+CONFIG_SERIAL_MSM=y
+CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_SERIAL_XILINX_PS_UART=y
+CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
+CONFIG_SERIAL_MVEBU_UART=y
+CONFIG_SERIAL_DEV_BUS=y
+CONFIG_SERIAL_DEV_CTRL_TTYPORT=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_I2C_HID=m
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_MUX_PCA954x=y
+CONFIG_I2C_BCM2835=m
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
+CONFIG_I2C_IMX=y
+CONFIG_I2C_MESON=y
+CONFIG_I2C_MV64XXX=y
+CONFIG_I2C_PXA=y
+CONFIG_I2C_QUP=y
+CONFIG_I2C_RK3X=y
+CONFIG_I2C_SH_MOBILE=y
+CONFIG_I2C_TEGRA=y
+CONFIG_I2C_UNIPHIER_F=y
+CONFIG_I2C_RCAR=y
+CONFIG_I2C_CROS_EC_TUNNEL=y
+CONFIG_SPI=y
+CONFIG_SPI_ARMADA_3700=y
+CONFIG_SPI_MESON_SPICC=m
+CONFIG_SPI_MESON_SPIFC=m
+CONFIG_SPI_BCM2835=m
+CONFIG_SPI_BCM2835AUX=m
+CONFIG_SPI_ORION=y
+CONFIG_SPI_PL022=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_ROCKCHIP=y
+CONFIG_SPI_S3C64XX=y
+CONFIG_SPI_SPIDEV=m
+CONFIG_SPMI=y
+CONFIG_PINCTRL_IPQ8074=y
+CONFIG_PINCTRL_SINGLE=y
+CONFIG_PINCTRL_MAX77620=y
+CONFIG_PINCTRL_MSM8916=y
+CONFIG_PINCTRL_MSM8994=y
+CONFIG_PINCTRL_MSM8996=y
+CONFIG_PINCTRL_MT7622=y
+CONFIG_PINCTRL_QDF2XXX=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_GPIO_DWAPB=y
+CONFIG_GPIO_MB86S7X=y
+CONFIG_GPIO_PL061=y
+CONFIG_GPIO_RCAR=y
+CONFIG_GPIO_UNIPHIER=y
+CONFIG_GPIO_XGENE=y
+CONFIG_GPIO_XGENE_SB=y
+CONFIG_GPIO_PCA953X=y
+CONFIG_GPIO_PCA953X_IRQ=y
+CONFIG_GPIO_MAX77620=y
+CONFIG_POWER_AVS=y
+<<<<<<<
+CONFIG_QCOM_CPR=y
+=======
+CONFIG_ROCKCHIP_IODOMAIN=y
+>>>>>>>
+CONFIG_POWER_RESET_MSM=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_SYSCON_REBOOT_MODE=y
+CONFIG_BATTERY_BQ27XXX=y
+CONFIG_SENSORS_ARM_SCPI=y
+CONFIG_SENSORS_LM90=m
+CONFIG_SENSORS_INA2XX=m
+CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
+CONFIG_CPU_THERMAL=y
+CONFIG_THERMAL_EMULATION=y
+CONFIG_ARMADA_THERMAL=y
+CONFIG_BRCMSTB_THERMAL=m
+CONFIG_EXYNOS_THERMAL=y
+CONFIG_RCAR_GEN3_THERMAL=y
+CONFIG_QCOM_TSENS=y
+CONFIG_ROCKCHIP_THERMAL=m
+CONFIG_TEGRA_BPMP_THERMAL=m
+CONFIG_UNIPHIER_THERMAL=y
+CONFIG_WATCHDOG=y
+CONFIG_S3C2410_WATCHDOG=y
+CONFIG_MESON_GXBB_WATCHDOG=m
+CONFIG_MESON_WATCHDOG=m
+CONFIG_RENESAS_WDT=y
+CONFIG_UNIPHIER_WATCHDOG=y
+CONFIG_BCM2835_WDT=y
+CONFIG_MFD_AXP20X_RSB=y
+CONFIG_MFD_CROS_EC=y
+CONFIG_MFD_CROS_EC_I2C=y
+CONFIG_MFD_CROS_EC_SPI=y
+CONFIG_MFD_CROS_EC_CHARDEV=m
+CONFIG_MFD_EXYNOS_LPASS=m
+CONFIG_MFD_HI6421_PMIC=y
+CONFIG_MFD_HI655X_PMIC=y
+CONFIG_MFD_MAX77620=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_RK808=y
+CONFIG_MFD_SEC_CORE=y
+CONFIG_REGULATOR_AXP20X=y
+CONFIG_REGULATOR_FAN53555=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_GPIO=y
+CONFIG_REGULATOR_HI6421V530=y
+CONFIG_REGULATOR_HI655X=y
+CONFIG_REGULATOR_MAX77620=y
+CONFIG_REGULATOR_PWM=y
+CONFIG_REGULATOR_QCOM_SMD_RPM=y
+CONFIG_REGULATOR_QCOM_SPMI=y
+CONFIG_REGULATOR_RK808=y
+CONFIG_REGULATOR_S2MPS11=y
+CONFIG_MEDIA_SUPPORT=m
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_ANALOG_TV_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_MEDIA_RC_SUPPORT=y
+CONFIG_RC_CORE=m
+CONFIG_RC_DEVICES=y
+CONFIG_RC_DECODERS=y
+CONFIG_IR_MESON=m
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+# CONFIG_DVB_NET is not set
+CONFIG_V4L_MEM2MEM_DRIVERS=y
+CONFIG_VIDEO_SAMSUNG_S5P_JPEG=m
+CONFIG_VIDEO_SAMSUNG_S5P_MFC=m
+CONFIG_VIDEO_SAMSUNG_EXYNOS_GSC=m
+CONFIG_VIDEO_RENESAS_FCP=m
+CONFIG_VIDEO_RENESAS_VSP1=m
+CONFIG_DRM=m
+CONFIG_DRM_NOUVEAU=m
+CONFIG_DRM_EXYNOS=m
+CONFIG_DRM_EXYNOS5433_DECON=y
+CONFIG_DRM_EXYNOS7_DECON=y
+CONFIG_DRM_EXYNOS_DSI=y
+# CONFIG_DRM_EXYNOS_DP is not set
+CONFIG_DRM_EXYNOS_HDMI=y
+CONFIG_DRM_EXYNOS_MIC=y
+CONFIG_DRM_ROCKCHIP=m
+CONFIG_ROCKCHIP_ANALOGIX_DP=y
+CONFIG_ROCKCHIP_CDN_DP=y
+CONFIG_ROCKCHIP_DW_HDMI=y
+CONFIG_ROCKCHIP_DW_MIPI_DSI=y
+CONFIG_ROCKCHIP_INNO_HDMI=y
+CONFIG_DRM_RCAR_DU=m
+CONFIG_DRM_RCAR_LVDS=y
+CONFIG_DRM_RCAR_VSP=y
+CONFIG_DRM_TEGRA=m
+CONFIG_DRM_PANEL_SIMPLE=m
+CONFIG_DRM_I2C_ADV7511=m
+CONFIG_DRM_VC4=m
+CONFIG_DRM_HISI_HIBMC=m
+CONFIG_DRM_HISI_KIRIN=m
+CONFIG_DRM_MESON=m
+CONFIG_FB=y
+CONFIG_FB_ARMCLCD=y
+CONFIG_BACKLIGHT_GENERIC=m
+CONFIG_BACKLIGHT_PWM=m
+CONFIG_BACKLIGHT_LP855X=m
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SND_BCM2835_SOC_I2S=m
+CONFIG_SND_SOC_SAMSUNG=y
+CONFIG_SND_SOC_RCAR=m
+CONFIG_SND_SOC_AK4613=m
+CONFIG_SND_SIMPLE_CARD=m
+CONFIG_SND_AUDIO_GRAPH_CARD=m
+CONFIG_USB=y
+CONFIG_USB_OTG=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_TEGRA=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_EXYNOS=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_EXYNOS=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_RENESAS_USBHS=m
+CONFIG_USB_STORAGE=y
+CONFIG_USB_MUSB_HDRC=y
+CONFIG_USB_MUSB_SUNXI=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC2=y
+CONFIG_USB_CHIPIDEA=y
+CONFIG_USB_CHIPIDEA_UDC=y
+CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_USB_CHIPIDEA_ULPI=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_HSIC_USB3503=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_ULPI=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_RENESAS_USBHS_UDC=m
+CONFIG_USB_RENESAS_USB3=m
+CONFIG_USB_ULPI_BUS=y
+CONFIG_MMC=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_ARMMMCI=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_ACPI=y
+CONFIG_MMC_SDHCI_F_SDH30=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_OF_ARASAN=y
+CONFIG_MMC_SDHCI_OF_ESDHC=y
+CONFIG_MMC_SDHCI_CADENCE=y
+CONFIG_MMC_SDHCI_TEGRA=y
+CONFIG_MMC_MESON_GX=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_SPI=y
+CONFIG_MMC_SDHI=y
+CONFIG_MMC_DW=y
+CONFIG_MMC_DW_EXYNOS=y
+CONFIG_MMC_DW_HI3798CV200=y
+CONFIG_MMC_DW_K3=y
+CONFIG_MMC_DW_ROCKCHIP=y
+CONFIG_MMC_SUNXI=y
+CONFIG_MMC_BCM2835=y
+CONFIG_MMC_SDHCI_XENON=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_PWM=y
+CONFIG_LEDS_SYSCON=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_LEDS_TRIGGER_PANIC=y
+CONFIG_LEDS_TRIGGER_DISK=y
+CONFIG_EDAC=y
+CONFIG_EDAC_GHES=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_MAX77686=y
+CONFIG_RTC_DRV_RK808=m
+CONFIG_RTC_DRV_S5M=y
+CONFIG_RTC_DRV_DS3232=y
+CONFIG_RTC_DRV_EFI=y
+CONFIG_RTC_DRV_S3C=y
+CONFIG_RTC_DRV_PL031=y
+CONFIG_RTC_DRV_SUN6I=y
+CONFIG_RTC_DRV_ARMADA38X=y
+CONFIG_RTC_DRV_TEGRA=y
+CONFIG_RTC_DRV_XGENE=y
+CONFIG_RTC_DRV_CROS_EC=y
+CONFIG_DMADEVICES=y
+CONFIG_DMA_BCM2835=m
+CONFIG_K3_DMA=y
+CONFIG_MV_XOR_V2=y
+CONFIG_PL330_DMA=y
+CONFIG_TEGRA20_APB_DMA=y
+CONFIG_QCOM_BAM_DMA=y
+CONFIG_QCOM_HIDMA_MGMT=y
+CONFIG_QCOM_HIDMA=y
+CONFIG_RCAR_DMAC=y
+CONFIG_RENESAS_USB_DMAC=m
+CONFIG_VFIO=y
+CONFIG_VFIO_PCI=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_XEN_GNTDEV=y
+CONFIG_XEN_GRANT_DEV_ALLOC=y
+CONFIG_COMMON_CLK_RK808=y
+CONFIG_COMMON_CLK_SCPI=y
+CONFIG_COMMON_CLK_CS2000_CP=y
+CONFIG_COMMON_CLK_S2MPS11=y
+CONFIG_CLK_QORIQ=y
+CONFIG_COMMON_CLK_PWM=y
+CONFIG_COMMON_CLK_QCOM=y
+CONFIG_QCOM_CLK_SMD_RPM=y
+CONFIG_IPQ_GCC_8074=y
+CONFIG_MSM_GCC_8916=y
+CONFIG_MSM_GCC_8994=y
+CONFIG_MSM_MMCC_8996=y
+CONFIG_HWSPINLOCK=y
+CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_ARM_MHU=y
+CONFIG_PLATFORM_MHU=y
+CONFIG_BCM2835_MBOX=y
+CONFIG_HI6220_MBOX=y
+CONFIG_QCOM_APCS_IPC=y
+CONFIG_ROCKCHIP_IOMMU=y
+CONFIG_TEGRA_IOMMU_SMMU=y
+CONFIG_ARM_SMMU=y
+CONFIG_ARM_SMMU_V3=y
+CONFIG_QCOM_IOMMU=y
+CONFIG_RPMSG_QCOM_GLINK_RPM=y
+CONFIG_RPMSG_QCOM_SMD=y
+CONFIG_RASPBERRYPI_POWER=y
+CONFIG_QCOM_SMEM=y
+CONFIG_QCOM_SMD_RPM=y
+CONFIG_QCOM_SMP2P=y
+CONFIG_QCOM_SMSM=y
+CONFIG_ROCKCHIP_PM_DOMAINS=y
+CONFIG_ARCH_TEGRA_132_SOC=y
+CONFIG_ARCH_TEGRA_210_SOC=y
+CONFIG_ARCH_TEGRA_186_SOC=y
+CONFIG_ARCH_TEGRA_194_SOC=y
+CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
+CONFIG_EXTCON_USB_GPIO=y
+CONFIG_EXTCON_USBC_CROS_EC=y
+CONFIG_MEMORY=y
+CONFIG_TEGRA_MC=y
+CONFIG_IIO=y
+CONFIG_EXYNOS_ADC=y
+CONFIG_ROCKCHIP_SARADC=m
+CONFIG_IIO_CROS_EC_SENSORS_CORE=m
+CONFIG_IIO_CROS_EC_SENSORS=m
+CONFIG_IIO_CROS_EC_LIGHT_PROX=m
+CONFIG_IIO_CROS_EC_BARO=m
+CONFIG_PWM=y
+CONFIG_PWM_BCM2835=m
+CONFIG_PWM_CROS_EC=m
+CONFIG_PWM_MESON=m
+CONFIG_PWM_RCAR=m
+CONFIG_PWM_ROCKCHIP=y
+CONFIG_PWM_SAMSUNG=y
+CONFIG_PWM_TEGRA=m
+CONFIG_PHY_HISTB_COMBPHY=y
+CONFIG_PHY_HISI_INNO_USB2=y
+CONFIG_PHY_RCAR_GEN3_USB2=y
+CONFIG_PHY_RCAR_GEN3_USB3=m
+CONFIG_PHY_HI6220_USB=y
+CONFIG_PHY_QCOM_USB_HS=y
+CONFIG_PHY_SUN4I_USB=y
+CONFIG_PHY_MVEBU_CP110_COMPHY=y
+CONFIG_PHY_QCOM_QMP=m
+CONFIG_PHY_ROCKCHIP_INNO_USB2=y
+CONFIG_PHY_ROCKCHIP_EMMC=y
+CONFIG_PHY_ROCKCHIP_PCIE=m
+CONFIG_PHY_ROCKCHIP_TYPEC=y
+CONFIG_PHY_XGENE=y
+CONFIG_PHY_TEGRA_XUSB=y
+CONFIG_QCOM_L2_PMU=y
+CONFIG_QCOM_L3_PMU=y
+CONFIG_MESON_EFUSE=m
+CONFIG_QCOM_QFPROM=y
+CONFIG_ROCKCHIP_EFUSE=y
+CONFIG_UNIPHIER_EFUSE=y
+CONFIG_TEE=y
+CONFIG_OPTEE=y
+CONFIG_ARM_SCPI_PROTOCOL=y
+CONFIG_RASPBERRYPI_FIRMWARE=y
+CONFIG_EFI_CAPSULE_LOADER=y
+CONFIG_ACPI=y
+CONFIG_ACPI_APEI=y
+CONFIG_ACPI_APEI_GHES=y
+CONFIG_ACPI_APEI_PCIEAER=y
+CONFIG_ACPI_APEI_MEMORY_FAILURE=y
+CONFIG_ACPI_APEI_EINJ=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
+CONFIG_QUOTA=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
+CONFIG_VFAT_FS=y
+CONFIG_HUGETLBFS=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_EFIVAR_FS=y
+CONFIG_SQUASHFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
+CONFIG_9P_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_LOCKUP_DETECTOR=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_FTRACE is not set
+CONFIG_MEMTEST=y
+CONFIG_SECURITY=y
+CONFIG_CRYPTO_ECHAINIV=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA256_ARM64=m
+CONFIG_CRYPTO_SHA512_ARM64=m
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m
+CONFIG_CRYPTO_CRC32_ARM64_CE=m
+CONFIG_CRYPTO_AES_ARM64=m
+CONFIG_CRYPTO_AES_ARM64_CE=m
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=m
+CONFIG_CRYPTO_CHACHA20_NEON=m
+CONFIG_CRYPTO_AES_ARM64_BS=m
+CONFIG_CRYPTO_SHA512_ARM64_CE=m
+CONFIG_CRYPTO_SHA3_ARM64=m
+CONFIG_CRYPTO_SM3_ARM64_CE=m
diff --git a/rr-cache/c8ab4f7b2ace1df1c0cf9f2a387abd7512dbd5e2/thisimage b/rr-cache/c8ab4f7b2ace1df1c0cf9f2a387abd7512dbd5e2/thisimage
new file mode 100644
index 0000000..119d38e
--- /dev/null
+++ b/rr-cache/c8ab4f7b2ace1df1c0cf9f2a387abd7512dbd5e2/thisimage
@@ -0,0 +1,715 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_NUMA_BALANCING=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_USER_NS=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_JUMP_LABEL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_ARCH_SUNXI=y
+CONFIG_ARCH_ALPINE=y
+CONFIG_ARCH_BCM2835=y
+CONFIG_ARCH_BCM_IPROC=y
+CONFIG_ARCH_BERLIN=y
+CONFIG_ARCH_BRCMSTB=y
+CONFIG_ARCH_EXYNOS=y
+CONFIG_ARCH_LAYERSCAPE=y
+CONFIG_ARCH_LG1K=y
+CONFIG_ARCH_HISI=y
+CONFIG_ARCH_MEDIATEK=y
+CONFIG_ARCH_MESON=y
+CONFIG_ARCH_MVEBU=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_ROCKCHIP=y
+CONFIG_ARCH_SEATTLE=y
+CONFIG_ARCH_RENESAS=y
+CONFIG_ARCH_R8A7795=y
+CONFIG_ARCH_R8A7796=y
+CONFIG_ARCH_R8A77965=y
+CONFIG_ARCH_R8A77970=y
+CONFIG_ARCH_R8A77980=y
+CONFIG_ARCH_R8A77990=y
+CONFIG_ARCH_R8A77995=y
+CONFIG_ARCH_STRATIX10=y
+CONFIG_ARCH_TEGRA=y
+CONFIG_ARCH_SPRD=y
+CONFIG_ARCH_SYNQUACER=y
+CONFIG_ARCH_THUNDER=y
+CONFIG_ARCH_THUNDER2=y
+CONFIG_ARCH_UNIPHIER=y
+CONFIG_ARCH_VEXPRESS=y
+CONFIG_ARCH_XGENE=y
+CONFIG_ARCH_ZX=y
+CONFIG_ARCH_ZYNQMP=y
+CONFIG_PCI=y
+CONFIG_HOTPLUG_PCI_PCIE=y
+CONFIG_PCI_IOV=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_ACPI=y
+CONFIG_PCI_LAYERSCAPE=y
+CONFIG_PCI_HISI=y
+CONFIG_PCIE_QCOM=y
+CONFIG_PCIE_KIRIN=y
+CONFIG_PCIE_ARMADA_8K=y
+CONFIG_PCIE_HISI_STB=y
+CONFIG_PCI_AARDVARK=y
+CONFIG_PCI_TEGRA=y
+CONFIG_PCIE_RCAR=y
+CONFIG_PCIE_ROCKCHIP=y
+CONFIG_PCIE_ROCKCHIP_HOST=m
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PCI_XGENE=y
+CONFIG_PCI_HOST_THUNDER_PEM=y
+CONFIG_PCI_HOST_THUNDER_ECAM=y
+CONFIG_ARM64_VA_BITS_48=y
+CONFIG_SCHED_MC=y
+CONFIG_NUMA=y
+CONFIG_PREEMPT=y
+CONFIG_KSM=y
+CONFIG_MEMORY_FAILURE=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CMA=y
+CONFIG_SECCOMP=y
+CONFIG_KEXEC=y
+CONFIG_CRASH_DUMP=y
+CONFIG_XEN=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_COMPAT=y
+CONFIG_HIBERNATION=y
+CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_ATTR_SET=y
+CONFIG_CPU_FREQ_GOV_COMMON=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=m
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPUFREQ_DT=y
+CONFIG_ARM_ARMADA_37XX_CPUFREQ=y
+CONFIG_ARM_BIG_LITTLE_CPUFREQ=y
+CONFIG_ARM_SCPI_CPUFREQ=y
+CONFIG_ARM_TEGRA186_CPUFREQ=y
+CONFIG_ACPI_CPPC_CPUFREQ=m
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IPV6=m
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_NAT=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_BRIDGE=m
+CONFIG_BRIDGE_VLAN_FILTERING=y
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_VLAN_8021Q_MVRP=y
+CONFIG_BPF_JIT=y
+CONFIG_BT=m
+CONFIG_BT_HIDP=m
+# CONFIG_BT_HS is not set
+# CONFIG_BT_LE is not set
+CONFIG_BT_LEDS=y
+# CONFIG_BT_DEBUGFS is not set
+CONFIG_BT_HCIBTUSB=m
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_LL=y
+CONFIG_BT_HCIUART_BCM=y
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
+CONFIG_MAC80211_LEDS=y
+CONFIG_RFKILL=m
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=32
+CONFIG_HISILICON_LPC=y
+CONFIG_SIMPLE_PM_BUS=y
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_DENALI_DT=y
+CONFIG_MTD_NAND_MARVELL=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_NBD=m
+CONFIG_VIRTIO_BLK=y
+CONFIG_BLK_DEV_NVME=m
+CONFIG_SRAM=y
+CONFIG_EEPROM_AT25=m
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+CONFIG_SCSI_SAS_ATA=y
+CONFIG_SCSI_HISI_SAS=y
+CONFIG_SCSI_HISI_SAS_PCI=y
+CONFIG_SCSI_UFSHCD=m
+CONFIG_SCSI_UFSHCD_PLATFORM=m
+CONFIG_SCSI_UFS_QCOM=m
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_AHCI_CEVA=y
+CONFIG_AHCI_MVEBU=y
+CONFIG_AHCI_XGENE=y
+CONFIG_AHCI_QORIQ=y
+CONFIG_SATA_SIL24=y
+CONFIG_SATA_RCAR=y
+CONFIG_PATA_PLATFORM=y
+CONFIG_PATA_OF_PLATFORM=y
+CONFIG_NETDEVICES=y
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_TUN=y
+CONFIG_VETH=m
+CONFIG_VIRTIO_NET=y
+CONFIG_AMD_XGBE=y
+CONFIG_NET_XGENE=y
+CONFIG_ATL1C=m
+CONFIG_MACB=y
+CONFIG_THUNDER_NIC_PF=y
+CONFIG_HIX5HD2_GMAC=y
+CONFIG_HNS_DSAF=y
+CONFIG_HNS_ENET=y
+CONFIG_E1000E=y
+CONFIG_IGB=y
+CONFIG_IGBVF=y
+CONFIG_MVNETA=y
+CONFIG_MVPP2=y
+CONFIG_SKY2=y
+CONFIG_QCOM_EMAC=m
+CONFIG_RAVB=y
+CONFIG_SMC91X=y
+CONFIG_SMSC911X=y
+CONFIG_SNI_AVE=y
+CONFIG_SNI_NETSEC=y
+CONFIG_STMMAC_ETH=m
+CONFIG_DWMAC_IPQ806X=m
+CONFIG_DWMAC_MESON=m
+CONFIG_DWMAC_ROCKCHIP=m
+CONFIG_DWMAC_SUNXI=m
+CONFIG_DWMAC_SUN8I=m
+CONFIG_MDIO_BUS_MUX_MMIOREG=y
+CONFIG_AT803X_PHY=m
+CONFIG_MARVELL_PHY=m
+CONFIG_MARVELL_10G_PHY=m
+CONFIG_MESON_GXL_PHY=m
+CONFIG_MICREL_PHY=y
+CONFIG_REALTEK_PHY=m
+CONFIG_ROCKCHIP_PHY=y
+CONFIG_USB_PEGASUS=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_RTL8152=m
+CONFIG_USB_LAN78XX=m
+CONFIG_USB_USBNET=m
+CONFIG_USB_NET_DM9601=m
+CONFIG_USB_NET_SR9800=m
+CONFIG_USB_NET_SMSC75XX=m
+CONFIG_USB_NET_SMSC95XX=m
+CONFIG_USB_NET_PLUSB=m
+CONFIG_USB_NET_MCS7830=m
+CONFIG_ATH10K=m
+CONFIG_ATH10K_PCI=m
+CONFIG_BRCMFMAC=m
+CONFIG_MWIFIEX=m
+CONFIG_MWIFIEX_PCIE=m
+CONFIG_WL18XX=m
+CONFIG_WLCORE_SDIO=m
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_ADC=m
+CONFIG_KEYBOARD_CROS_EC=y
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ATMEL_MXT=m
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_PM8941_PWRKEY=y
+CONFIG_INPUT_HISI_POWERKEY=y
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_AMBAKMI=y
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_BCM2835AUX=y
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_8250_MT6577=y
+CONFIG_SERIAL_8250_UNIPHIER=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_MESON=y
+CONFIG_SERIAL_MESON_CONSOLE=y
+CONFIG_SERIAL_SAMSUNG=y
+CONFIG_SERIAL_SAMSUNG_CONSOLE=y
+CONFIG_SERIAL_TEGRA=y
+CONFIG_SERIAL_SH_SCI=y
+CONFIG_SERIAL_SH_SCI_NR_UARTS=11
+CONFIG_SERIAL_SH_SCI_CONSOLE=y
+CONFIG_SERIAL_MSM=y
+CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_SERIAL_XILINX_PS_UART=y
+CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
+CONFIG_SERIAL_MVEBU_UART=y
+CONFIG_SERIAL_DEV_BUS=y
+CONFIG_SERIAL_DEV_CTRL_TTYPORT=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_I2C_HID=m
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_MUX_PCA954x=y
+CONFIG_I2C_BCM2835=m
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
+CONFIG_I2C_IMX=y
+CONFIG_I2C_MESON=y
+CONFIG_I2C_MV64XXX=y
+CONFIG_I2C_PXA=y
+CONFIG_I2C_QUP=y
+CONFIG_I2C_RK3X=y
+CONFIG_I2C_SH_MOBILE=y
+CONFIG_I2C_TEGRA=y
+CONFIG_I2C_UNIPHIER_F=y
+CONFIG_I2C_RCAR=y
+CONFIG_I2C_CROS_EC_TUNNEL=y
+CONFIG_SPI=y
+CONFIG_SPI_ARMADA_3700=y
+CONFIG_SPI_MESON_SPICC=m
+CONFIG_SPI_MESON_SPIFC=m
+CONFIG_SPI_BCM2835=m
+CONFIG_SPI_BCM2835AUX=m
+CONFIG_SPI_ORION=y
+CONFIG_SPI_PL022=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_ROCKCHIP=y
+CONFIG_SPI_S3C64XX=y
+CONFIG_SPI_SPIDEV=m
+CONFIG_SPMI=y
+CONFIG_PINCTRL_IPQ8074=y
+CONFIG_PINCTRL_SINGLE=y
+CONFIG_PINCTRL_MAX77620=y
+CONFIG_PINCTRL_MSM8916=y
+CONFIG_PINCTRL_MSM8994=y
+CONFIG_PINCTRL_MSM8996=y
+CONFIG_PINCTRL_MT7622=y
+CONFIG_PINCTRL_QDF2XXX=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_GPIO_DWAPB=y
+CONFIG_GPIO_MB86S7X=y
+CONFIG_GPIO_PL061=y
+CONFIG_GPIO_RCAR=y
+CONFIG_GPIO_UNIPHIER=y
+CONFIG_GPIO_XGENE=y
+CONFIG_GPIO_XGENE_SB=y
+CONFIG_GPIO_PCA953X=y
+CONFIG_GPIO_PCA953X_IRQ=y
+CONFIG_GPIO_MAX77620=y
+CONFIG_POWER_AVS=y
+<<<<<<<
+CONFIG_QCOM_CPR=y
+=======
+CONFIG_ROCKCHIP_IODOMAIN=y
+>>>>>>>
+CONFIG_POWER_RESET_MSM=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_SYSCON_REBOOT_MODE=y
+CONFIG_BATTERY_BQ27XXX=y
+CONFIG_SENSORS_ARM_SCPI=y
+CONFIG_SENSORS_LM90=m
+CONFIG_SENSORS_INA2XX=m
+CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
+CONFIG_CPU_THERMAL=y
+CONFIG_THERMAL_EMULATION=y
+CONFIG_ARMADA_THERMAL=y
+CONFIG_BRCMSTB_THERMAL=m
+CONFIG_EXYNOS_THERMAL=y
+CONFIG_RCAR_GEN3_THERMAL=y
+CONFIG_QCOM_TSENS=y
+CONFIG_ROCKCHIP_THERMAL=m
+CONFIG_TEGRA_BPMP_THERMAL=m
+CONFIG_UNIPHIER_THERMAL=y
+CONFIG_WATCHDOG=y
+CONFIG_S3C2410_WATCHDOG=y
+CONFIG_MESON_GXBB_WATCHDOG=m
+CONFIG_MESON_WATCHDOG=m
+CONFIG_RENESAS_WDT=y
+CONFIG_UNIPHIER_WATCHDOG=y
+CONFIG_BCM2835_WDT=y
+CONFIG_MFD_AXP20X_RSB=y
+CONFIG_MFD_CROS_EC=y
+CONFIG_MFD_CROS_EC_I2C=y
+CONFIG_MFD_CROS_EC_SPI=y
+CONFIG_MFD_CROS_EC_CHARDEV=m
+CONFIG_MFD_EXYNOS_LPASS=m
+CONFIG_MFD_HI6421_PMIC=y
+CONFIG_MFD_HI655X_PMIC=y
+CONFIG_MFD_MAX77620=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_RK808=y
+CONFIG_MFD_SEC_CORE=y
+CONFIG_REGULATOR_AXP20X=y
+CONFIG_REGULATOR_FAN53555=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_GPIO=y
+CONFIG_REGULATOR_HI6421V530=y
+CONFIG_REGULATOR_HI655X=y
+CONFIG_REGULATOR_MAX77620=y
+CONFIG_REGULATOR_PWM=y
+CONFIG_REGULATOR_QCOM_SMD_RPM=y
+CONFIG_REGULATOR_QCOM_SPMI=y
+CONFIG_REGULATOR_RK808=y
+CONFIG_REGULATOR_S2MPS11=y
+CONFIG_MEDIA_SUPPORT=m
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_ANALOG_TV_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_MEDIA_RC_SUPPORT=y
+CONFIG_RC_CORE=m
+CONFIG_RC_DEVICES=y
+CONFIG_RC_DECODERS=y
+CONFIG_IR_MESON=m
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+# CONFIG_DVB_NET is not set
+CONFIG_V4L_MEM2MEM_DRIVERS=y
+CONFIG_VIDEO_SAMSUNG_S5P_JPEG=m
+CONFIG_VIDEO_SAMSUNG_S5P_MFC=m
+CONFIG_VIDEO_SAMSUNG_EXYNOS_GSC=m
+CONFIG_VIDEO_RENESAS_FCP=m
+CONFIG_VIDEO_RENESAS_VSP1=m
+CONFIG_DRM=m
+CONFIG_DRM_NOUVEAU=m
+CONFIG_DRM_EXYNOS=m
+CONFIG_DRM_EXYNOS5433_DECON=y
+CONFIG_DRM_EXYNOS7_DECON=y
+CONFIG_DRM_EXYNOS_DSI=y
+# CONFIG_DRM_EXYNOS_DP is not set
+CONFIG_DRM_EXYNOS_HDMI=y
+CONFIG_DRM_EXYNOS_MIC=y
+CONFIG_DRM_ROCKCHIP=m
+CONFIG_ROCKCHIP_ANALOGIX_DP=y
+CONFIG_ROCKCHIP_CDN_DP=y
+CONFIG_ROCKCHIP_DW_HDMI=y
+CONFIG_ROCKCHIP_DW_MIPI_DSI=y
+CONFIG_ROCKCHIP_INNO_HDMI=y
+CONFIG_DRM_RCAR_DU=m
+CONFIG_DRM_RCAR_LVDS=y
+CONFIG_DRM_RCAR_VSP=y
+CONFIG_DRM_TEGRA=m
+CONFIG_DRM_PANEL_SIMPLE=m
+CONFIG_DRM_I2C_ADV7511=m
+CONFIG_DRM_VC4=m
+CONFIG_DRM_HISI_HIBMC=m
+CONFIG_DRM_HISI_KIRIN=m
+CONFIG_DRM_MESON=m
+CONFIG_FB=y
+CONFIG_FB_ARMCLCD=y
+CONFIG_BACKLIGHT_GENERIC=m
+CONFIG_BACKLIGHT_PWM=m
+CONFIG_BACKLIGHT_LP855X=m
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SND_BCM2835_SOC_I2S=m
+CONFIG_SND_SOC_SAMSUNG=y
+CONFIG_SND_SOC_RCAR=m
+CONFIG_SND_SOC_AK4613=m
+CONFIG_SND_SIMPLE_CARD=m
+CONFIG_SND_AUDIO_GRAPH_CARD=m
+CONFIG_USB=y
+CONFIG_USB_OTG=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_TEGRA=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_EXYNOS=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_EXYNOS=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_RENESAS_USBHS=m
+CONFIG_USB_STORAGE=y
+CONFIG_USB_MUSB_HDRC=y
+CONFIG_USB_MUSB_SUNXI=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC2=y
+CONFIG_USB_CHIPIDEA=y
+CONFIG_USB_CHIPIDEA_UDC=y
+CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_USB_CHIPIDEA_ULPI=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_HSIC_USB3503=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_ULPI=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_RENESAS_USBHS_UDC=m
+CONFIG_USB_RENESAS_USB3=m
+CONFIG_USB_ULPI_BUS=y
+CONFIG_MMC=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_ARMMMCI=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_ACPI=y
+CONFIG_MMC_SDHCI_F_SDH30=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_OF_ARASAN=y
+CONFIG_MMC_SDHCI_OF_ESDHC=y
+CONFIG_MMC_SDHCI_CADENCE=y
+CONFIG_MMC_SDHCI_TEGRA=y
+CONFIG_MMC_MESON_GX=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_SPI=y
+CONFIG_MMC_SDHI=y
+CONFIG_MMC_DW=y
+CONFIG_MMC_DW_EXYNOS=y
+CONFIG_MMC_DW_HI3798CV200=y
+CONFIG_MMC_DW_K3=y
+CONFIG_MMC_DW_ROCKCHIP=y
+CONFIG_MMC_SUNXI=y
+CONFIG_MMC_BCM2835=y
+CONFIG_MMC_SDHCI_XENON=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_PWM=y
+CONFIG_LEDS_SYSCON=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_LEDS_TRIGGER_PANIC=y
+CONFIG_LEDS_TRIGGER_DISK=y
+CONFIG_EDAC=y
+CONFIG_EDAC_GHES=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_MAX77686=y
+CONFIG_RTC_DRV_RK808=m
+CONFIG_RTC_DRV_S5M=y
+CONFIG_RTC_DRV_DS3232=y
+CONFIG_RTC_DRV_EFI=y
+CONFIG_RTC_DRV_S3C=y
+CONFIG_RTC_DRV_PL031=y
+CONFIG_RTC_DRV_SUN6I=y
+CONFIG_RTC_DRV_ARMADA38X=y
+CONFIG_RTC_DRV_TEGRA=y
+CONFIG_RTC_DRV_XGENE=y
+CONFIG_RTC_DRV_CROS_EC=y
+CONFIG_DMADEVICES=y
+CONFIG_DMA_BCM2835=m
+CONFIG_K3_DMA=y
+CONFIG_MV_XOR_V2=y
+CONFIG_PL330_DMA=y
+CONFIG_TEGRA20_APB_DMA=y
+CONFIG_QCOM_BAM_DMA=y
+CONFIG_QCOM_HIDMA_MGMT=y
+CONFIG_QCOM_HIDMA=y
+CONFIG_RCAR_DMAC=y
+CONFIG_RENESAS_USB_DMAC=m
+CONFIG_VFIO=y
+CONFIG_VFIO_PCI=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_XEN_GNTDEV=y
+CONFIG_XEN_GRANT_DEV_ALLOC=y
+CONFIG_COMMON_CLK_RK808=y
+CONFIG_COMMON_CLK_SCPI=y
+CONFIG_COMMON_CLK_CS2000_CP=y
+CONFIG_COMMON_CLK_S2MPS11=y
+CONFIG_CLK_QORIQ=y
+CONFIG_COMMON_CLK_PWM=y
+CONFIG_COMMON_CLK_QCOM=y
+CONFIG_QCOM_CLK_SMD_RPM=y
+CONFIG_IPQ_GCC_8074=y
+CONFIG_MSM_GCC_8916=y
+CONFIG_MSM_GCC_8994=y
+CONFIG_MSM_MMCC_8996=y
+CONFIG_HWSPINLOCK=y
+CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_ARM_MHU=y
+CONFIG_PLATFORM_MHU=y
+CONFIG_BCM2835_MBOX=y
+CONFIG_HI6220_MBOX=y
+CONFIG_QCOM_APCS_IPC=y
+CONFIG_ROCKCHIP_IOMMU=y
+CONFIG_TEGRA_IOMMU_SMMU=y
+CONFIG_ARM_SMMU=y
+CONFIG_ARM_SMMU_V3=y
+CONFIG_QCOM_IOMMU=y
+CONFIG_RPMSG_QCOM_GLINK_RPM=y
+CONFIG_RPMSG_QCOM_SMD=y
+CONFIG_RASPBERRYPI_POWER=y
+CONFIG_QCOM_SMEM=y
+CONFIG_QCOM_SMD_RPM=y
+CONFIG_QCOM_SMP2P=y
+CONFIG_QCOM_SMSM=y
+CONFIG_ROCKCHIP_PM_DOMAINS=y
+CONFIG_ARCH_TEGRA_132_SOC=y
+CONFIG_ARCH_TEGRA_210_SOC=y
+CONFIG_ARCH_TEGRA_186_SOC=y
+CONFIG_ARCH_TEGRA_194_SOC=y
+CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
+CONFIG_EXTCON_USB_GPIO=y
+CONFIG_EXTCON_USBC_CROS_EC=y
+CONFIG_MEMORY=y
+CONFIG_TEGRA_MC=y
+CONFIG_IIO=y
+CONFIG_EXYNOS_ADC=y
+CONFIG_ROCKCHIP_SARADC=m
+CONFIG_IIO_CROS_EC_SENSORS_CORE=m
+CONFIG_IIO_CROS_EC_SENSORS=m
+CONFIG_IIO_CROS_EC_LIGHT_PROX=m
+CONFIG_IIO_CROS_EC_BARO=m
+CONFIG_PWM=y
+CONFIG_PWM_BCM2835=m
+CONFIG_PWM_CROS_EC=m
+CONFIG_PWM_MESON=m
+CONFIG_PWM_RCAR=m
+CONFIG_PWM_ROCKCHIP=y
+CONFIG_PWM_SAMSUNG=y
+CONFIG_PWM_TEGRA=m
+CONFIG_PHY_HISTB_COMBPHY=y
+CONFIG_PHY_HISI_INNO_USB2=y
+CONFIG_PHY_RCAR_GEN3_USB2=y
+CONFIG_PHY_RCAR_GEN3_USB3=m
+CONFIG_PHY_HI6220_USB=y
+CONFIG_PHY_QCOM_USB_HS=y
+CONFIG_PHY_SUN4I_USB=y
+CONFIG_PHY_MVEBU_CP110_COMPHY=y
+CONFIG_PHY_QCOM_QMP=m
+CONFIG_PHY_ROCKCHIP_INNO_USB2=y
+CONFIG_PHY_ROCKCHIP_EMMC=y
+CONFIG_PHY_ROCKCHIP_PCIE=m
+CONFIG_PHY_ROCKCHIP_TYPEC=y
+CONFIG_PHY_XGENE=y
+CONFIG_PHY_TEGRA_XUSB=y
+CONFIG_QCOM_L2_PMU=y
+CONFIG_QCOM_L3_PMU=y
+CONFIG_MESON_EFUSE=m
+CONFIG_QCOM_QFPROM=y
+CONFIG_ROCKCHIP_EFUSE=y
+CONFIG_UNIPHIER_EFUSE=y
+CONFIG_TEE=y
+CONFIG_OPTEE=y
+CONFIG_ARM_SCPI_PROTOCOL=y
+CONFIG_RASPBERRYPI_FIRMWARE=y
+CONFIG_EFI_CAPSULE_LOADER=y
+CONFIG_ACPI=y
+CONFIG_ACPI_APEI=y
+CONFIG_ACPI_APEI_GHES=y
+CONFIG_ACPI_APEI_PCIEAER=y
+CONFIG_ACPI_APEI_MEMORY_FAILURE=y
+CONFIG_ACPI_APEI_EINJ=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
+CONFIG_QUOTA=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
+CONFIG_VFAT_FS=y
+CONFIG_HUGETLBFS=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_EFIVAR_FS=y
+CONFIG_SQUASHFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
+CONFIG_9P_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_LOCKUP_DETECTOR=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_FTRACE is not set
+CONFIG_MEMTEST=y
+CONFIG_SECURITY=y
+CONFIG_CRYPTO_ECHAINIV=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA256_ARM64=m
+CONFIG_CRYPTO_SHA512_ARM64=m
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m
+CONFIG_CRYPTO_CRC32_ARM64_CE=m
+CONFIG_CRYPTO_AES_ARM64=m
+CONFIG_CRYPTO_AES_ARM64_CE=m
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=m
+CONFIG_CRYPTO_CHACHA20_NEON=m
+CONFIG_CRYPTO_AES_ARM64_BS=m
+CONFIG_CRYPTO_SHA512_ARM64_CE=m
+CONFIG_CRYPTO_SHA3_ARM64=m
+CONFIG_CRYPTO_SM3_ARM64_CE=m
diff --git a/rr-cache/c9867e004c11ba39a2594258c2956921203f991b/postimage b/rr-cache/c9867e004c11ba39a2594258c2956921203f991b/postimage
new file mode 100644
index 0000000..5308523
--- /dev/null
+++ b/rr-cache/c9867e004c11ba39a2594258c2956921203f991b/postimage
@@ -0,0 +1,1399 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+// Copyright (c) 2018, Linaro Limited
+
+#include <linux/mutex.h>
+#include <linux/wait.h>
+#include <linux/module.h>
+#include <linux/soc/qcom/apr.h>
+#include <linux/device.h>
+#include <linux/of_platform.h>
+#include <linux/spinlock.h>
+#include <linux/kref.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <uapi/sound/asound.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include "q6asm.h"
+#include "q6core.h"
+#include "q6dsp-errno.h"
+#include "q6dsp-common.h"
+
+#define ASM_STREAM_CMD_CLOSE 0x00010BCD
+#define ASM_STREAM_CMD_FLUSH 0x00010BCE
+#define ASM_SESSION_CMD_PAUSE 0x00010BD3
+#define ASM_DATA_CMD_EOS 0x00010BDB
+#define ASM_NULL_POPP_TOPOLOGY 0x00010C68
+#define ASM_STREAM_CMD_FLUSH_READBUFS 0x00010C09
+#define ASM_STREAM_CMD_SET_ENCDEC_PARAM 0x00010C10
+#define ASM_STREAM_POSTPROC_TOPO_ID_NONE 0x00010C68
+#define ASM_CMD_SHARED_MEM_MAP_REGIONS 0x00010D92
+#define ASM_CMDRSP_SHARED_MEM_MAP_REGIONS 0x00010D93
+#define ASM_CMD_SHARED_MEM_UNMAP_REGIONS 0x00010D94
+#define ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2 0x00010D98
+#define ASM_DATA_EVENT_WRITE_DONE_V2 0x00010D99
+#define ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2 0x00010DA3
+#define ASM_SESSION_CMD_RUN_V2 0x00010DAA
+#define ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2 0x00010DA5
+#define ASM_DATA_CMD_WRITE_V2 0x00010DAB
+#define ASM_DATA_CMD_READ_V2 0x00010DAC
+#define ASM_SESSION_CMD_SUSPEND 0x00010DEC
+#define ASM_STREAM_CMD_OPEN_WRITE_V3 0x00010DB3
+#define ASM_STREAM_CMD_OPEN_READ_V3 0x00010DB4
+#define ASM_DATA_EVENT_READ_DONE_V2 0x00010D9A
+#define ASM_STREAM_CMD_OPEN_READWRITE_V2 0x00010D8D
+
+
+#define ASM_LEGACY_STREAM_SESSION 0
+/* Bit shift for the stream_perf_mode subfield. */
+#define ASM_SHIFT_STREAM_PERF_MODE_FLAG_IN_OPEN_READ 29
+#define ASM_END_POINT_DEVICE_MATRIX 0
+#define ASM_DEFAULT_APP_TYPE 0
+#define ASM_SYNC_IO_MODE 0x0001
+#define ASM_ASYNC_IO_MODE 0x0002
+#define ASM_TUN_READ_IO_MODE 0x0004 /* tunnel read write mode */
+#define ASM_TUN_WRITE_IO_MODE 0x0008 /* tunnel read write mode */
+#define ASM_SHIFT_GAPLESS_MODE_FLAG 31
+#define ADSP_MEMORY_MAP_SHMEM8_4K_POOL 3
+
+struct avs_cmd_shared_mem_map_regions {
+ u16 mem_pool_id;
+ u16 num_regions;
+ u32 property_flag;
+} __packed;
+
+struct avs_shared_map_region_payload {
+ u32 shm_addr_lsw;
+ u32 shm_addr_msw;
+ u32 mem_size_bytes;
+} __packed;
+
+struct avs_cmd_shared_mem_unmap_regions {
+ u32 mem_map_handle;
+} __packed;
+
+struct asm_data_cmd_media_fmt_update_v2 {
+ u32 fmt_blk_size;
+} __packed;
+
+struct asm_multi_channel_pcm_fmt_blk_v2 {
+ struct asm_data_cmd_media_fmt_update_v2 fmt_blk;
+ u16 num_channels;
+ u16 bits_per_sample;
+ u32 sample_rate;
+ u16 is_signed;
+ u16 reserved;
+ u8 channel_mapping[PCM_MAX_NUM_CHANNEL];
+} __packed;
+
+struct asm_stream_cmd_set_encdec_param {
+ u32 param_id;
+ u32 param_size;
+} __packed;
+
+struct asm_enc_cfg_blk_param_v2 {
+ u32 frames_per_buf;
+ u32 enc_cfg_blk_size;
+} __packed;
+
+struct asm_multi_channel_pcm_enc_cfg_v2 {
+ struct asm_stream_cmd_set_encdec_param encdec;
+ struct asm_enc_cfg_blk_param_v2 encblk;
+ uint16_t num_channels;
+ uint16_t bits_per_sample;
+ uint32_t sample_rate;
+ uint16_t is_signed;
+ uint16_t reserved;
+ uint8_t channel_mapping[8];
+} __packed;
+
+struct asm_data_cmd_read_v2 {
+ u32 buf_addr_lsw;
+ u32 buf_addr_msw;
+ u32 mem_map_handle;
+ u32 buf_size;
+ u32 seq_id;
+} __packed;
+
+struct asm_data_cmd_read_v2_done {
+ u32 status;
+ u32 buf_addr_lsw;
+ u32 buf_addr_msw;
+};
+
+struct asm_stream_cmd_open_read_v3 {
+ u32 mode_flags;
+ u32 src_endpointype;
+ u32 preprocopo_id;
+ u32 enc_cfg_id;
+ u16 bits_per_sample;
+ u16 reserved;
+} __packed;
+
+struct asm_data_cmd_write_v2 {
+ u32 buf_addr_lsw;
+ u32 buf_addr_msw;
+ u32 mem_map_handle;
+ u32 buf_size;
+ u32 seq_id;
+ u32 timestamp_lsw;
+ u32 timestamp_msw;
+ u32 flags;
+} __packed;
+
+struct asm_stream_cmd_open_write_v3 {
+ uint32_t mode_flags;
+ uint16_t sink_endpointype;
+ uint16_t bits_per_sample;
+ uint32_t postprocopo_id;
+ uint32_t dec_fmt_id;
+} __packed;
+
+struct asm_session_cmd_run_v2 {
+ u32 flags;
+ u32 time_lsw;
+ u32 time_msw;
+} __packed;
+
+struct audio_buffer {
+ phys_addr_t phys;
+ uint32_t size; /* size of buffer */
+};
+
+struct audio_port_data {
+ struct audio_buffer *buf;
+ uint32_t num_periods;
+ uint32_t dsp_buf;
+ uint32_t mem_map_handle;
+};
+
+struct q6asm {
+ struct apr_device *adev;
+ struct device *dev;
+ struct q6core_svc_api_info ainfo;
+ wait_queue_head_t mem_wait;
+ struct platform_device *pcmdev;
+ spinlock_t slock;
+ struct audio_client *session[MAX_SESSIONS + 1];
+ struct platform_device *pdev_dais;
+};
+
+struct audio_client {
+ int session;
+ q6asm_cb cb;
+ void *priv;
+ uint32_t io_mode;
+ struct apr_device *adev;
+ struct mutex cmd_lock;
+ spinlock_t lock;
+ struct kref refcount;
+ /* idx:1 out port, 0: in port */
+ struct audio_port_data port[2];
+ wait_queue_head_t cmd_wait;
+ struct aprv2_ibasic_rsp_result_t result;
+ int perf_mode;
+ int stream_id;
+ struct q6asm *q6asm;
+ struct device *dev;
+};
+
+static inline void q6asm_add_hdr(struct audio_client *ac, struct apr_hdr *hdr,
+ uint32_t pkt_size, bool cmd_flg,
+ uint32_t stream_id)
+{
+ hdr->hdr_field = APR_SEQ_CMD_HDR_FIELD;
+ hdr->src_port = ((ac->session << 8) & 0xFF00) | (stream_id);
+ hdr->dest_port = ((ac->session << 8) & 0xFF00) | (stream_id);
+ hdr->pkt_size = pkt_size;
+ if (cmd_flg)
+ hdr->token = ac->session;
+}
+
+static int q6asm_apr_send_session_pkt(struct q6asm *a, struct audio_client *ac,
+ struct apr_pkt *pkt, uint32_t rsp_opcode)
+{
+ struct apr_hdr *hdr = &pkt->hdr;
+ int rc;
+
+ mutex_lock(&ac->cmd_lock);
+ ac->result.opcode = 0;
+ ac->result.status = 0;
+ rc = apr_send_pkt(a->adev, pkt);
+ if (rc < 0)
+ goto err;
+
+ if (rsp_opcode)
+ rc = wait_event_timeout(a->mem_wait,
+ (ac->result.opcode == hdr->opcode) ||
+ (ac->result.opcode == rsp_opcode),
+ 5 * HZ);
+ else
+ rc = wait_event_timeout(a->mem_wait,
+ (ac->result.opcode == hdr->opcode),
+ 5 * HZ);
+
+ if (!rc) {
+ dev_err(a->dev, "CMD timeout\n");
+ rc = -ETIMEDOUT;
+ } else if (ac->result.status > 0) {
+ dev_err(a->dev, "DSP returned error[%x]\n",
+ ac->result.status);
+ rc = -EINVAL;
+ }
+
+err:
+ mutex_unlock(&ac->cmd_lock);
+ return rc;
+}
+
+static int __q6asm_memory_unmap(struct audio_client *ac,
+ phys_addr_t buf_add, int dir)
+{
+ struct avs_cmd_shared_mem_unmap_regions *mem_unmap;
+ struct q6asm *a = dev_get_drvdata(ac->dev->parent);
+ struct apr_pkt *pkt;
+ int rc, pkt_size;
+ void *p;
+
+ if (ac->port[dir].mem_map_handle == 0) {
+ dev_err(ac->dev, "invalid mem handle\n");
+ return -EINVAL;
+ }
+
+ pkt_size = APR_HDR_SIZE + sizeof(*mem_unmap);
+ p = kzalloc(pkt_size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ mem_unmap = p + APR_HDR_SIZE;
+
+ pkt->hdr.hdr_field = APR_SEQ_CMD_HDR_FIELD;
+ pkt->hdr.src_port = 0;
+ pkt->hdr.dest_port = 0;
+ pkt->hdr.pkt_size = pkt_size;
+ pkt->hdr.token = ((ac->session << 8) | dir);
+
+ pkt->hdr.opcode = ASM_CMD_SHARED_MEM_UNMAP_REGIONS;
+ mem_unmap->mem_map_handle = ac->port[dir].mem_map_handle;
+
+ rc = q6asm_apr_send_session_pkt(a, ac, pkt, 0);
+ if (rc < 0) {
+ kfree(pkt);
+ return rc;
+ }
+
+ ac->port[dir].mem_map_handle = 0;
+
+ kfree(pkt);
+ return 0;
+}
+
+
+static void q6asm_audio_client_free_buf(struct audio_client *ac,
+ struct audio_port_data *port)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ac->lock, flags);
+ port->num_periods = 0;
+ kfree(port->buf);
+ port->buf = NULL;
+ spin_unlock_irqrestore(&ac->lock, flags);
+}
+
+/**
+ * q6asm_unmap_memory_regions() - unmap memory regions in the dsp.
+ *
+ * @dir: direction of audio stream
+ * @ac: audio client instanace
+ *
+ * Return: Will be an negative value on failure or zero on success
+ */
+int q6asm_unmap_memory_regions(unsigned int dir, struct audio_client *ac)
+{
+ struct audio_port_data *port;
+ int cnt = 0;
+ int rc = 0;
+
+ port = &ac->port[dir];
+ if (!port->buf) {
+ rc = -EINVAL;
+ goto err;
+ }
+
+ cnt = port->num_periods - 1;
+ if (cnt >= 0) {
+ rc = __q6asm_memory_unmap(ac, port->buf[dir].phys, dir);
+ if (rc < 0) {
+ dev_err(ac->dev, "%s: Memory_unmap_regions failed %d\n",
+ __func__, rc);
+ goto err;
+ }
+ }
+
+ q6asm_audio_client_free_buf(ac, port);
+
+err:
+ return rc;
+}
+EXPORT_SYMBOL_GPL(q6asm_unmap_memory_regions);
+
+static int __q6asm_memory_map_regions(struct audio_client *ac, int dir,
+ size_t period_sz, unsigned int periods,
+ bool is_contiguous)
+{
+ struct avs_cmd_shared_mem_map_regions *cmd = NULL;
+ struct avs_shared_map_region_payload *mregions = NULL;
+ struct q6asm *a = dev_get_drvdata(ac->dev->parent);
+ struct audio_port_data *port = NULL;
+ struct audio_buffer *ab = NULL;
+ struct apr_pkt *pkt;
+ void *p;
+ unsigned long flags;
+ uint32_t num_regions, buf_sz;
+ int rc, i, pkt_size;
+
+ if (is_contiguous) {
+ num_regions = 1;
+ buf_sz = period_sz * periods;
+ } else {
+ buf_sz = period_sz;
+ num_regions = periods;
+ }
+
+ /* DSP expects size should be aligned to 4K */
+ buf_sz = ALIGN(buf_sz, 4096);
+
+ pkt_size = APR_HDR_SIZE + sizeof(*cmd) +
+ (sizeof(*mregions) * num_regions);
+
+ p = kzalloc(pkt_size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ cmd = p + APR_HDR_SIZE;
+ mregions = p + APR_HDR_SIZE + sizeof(*cmd);
+
+ pkt->hdr.hdr_field = APR_SEQ_CMD_HDR_FIELD;
+ pkt->hdr.src_port = 0;
+ pkt->hdr.dest_port = 0;
+ pkt->hdr.pkt_size = pkt_size;
+ pkt->hdr.token = ((ac->session << 8) | dir);
+ pkt->hdr.opcode = ASM_CMD_SHARED_MEM_MAP_REGIONS;
+
+ cmd->mem_pool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL;
+ cmd->num_regions = num_regions;
+ cmd->property_flag = 0x00;
+
+ spin_lock_irqsave(&ac->lock, flags);
+ port = &ac->port[dir];
+
+ for (i = 0; i < num_regions; i++) {
+ ab = &port->buf[i];
+ mregions->shm_addr_lsw = lower_32_bits(ab->phys);
+ mregions->shm_addr_msw = upper_32_bits(ab->phys);
+ mregions->mem_size_bytes = buf_sz;
+ ++mregions;
+ }
+ spin_unlock_irqrestore(&ac->lock, flags);
+
+ rc = q6asm_apr_send_session_pkt(a, ac, pkt,
+ ASM_CMDRSP_SHARED_MEM_MAP_REGIONS);
+
+ kfree(pkt);
+
+ return rc;
+}
+
+/**
+ * q6asm_map_memory_regions() - map memory regions in the dsp.
+ *
+ * @dir: direction of audio stream
+ * @ac: audio client instanace
+ * @phys: physcial address that needs mapping.
+ * @period_sz: audio period size
+ * @periods: number of periods
+ *
+ * Return: Will be an negative value on failure or zero on success
+ */
+int q6asm_map_memory_regions(unsigned int dir, struct audio_client *ac,
+ phys_addr_t phys,
+ size_t period_sz, unsigned int periods)
+{
+ struct audio_buffer *buf;
+ unsigned long flags;
+ int cnt;
+ int rc;
+
+ spin_lock_irqsave(&ac->lock, flags);
+ if (ac->port[dir].buf) {
+ dev_err(ac->dev, "Buffer already allocated\n");
+ spin_unlock_irqrestore(&ac->lock, flags);
+ return 0;
+ }
+
+ buf = kzalloc(((sizeof(struct audio_buffer)) * periods), GFP_ATOMIC);
+ if (!buf) {
+ spin_unlock_irqrestore(&ac->lock, flags);
+ return -ENOMEM;
+ }
+
+
+ ac->port[dir].buf = buf;
+
+ buf[0].phys = phys;
+ buf[0].size = period_sz;
+
+ for (cnt = 1; cnt < periods; cnt++) {
+ if (period_sz > 0) {
+ buf[cnt].phys = buf[0].phys + (cnt * period_sz);
+ buf[cnt].size = period_sz;
+ }
+ }
+ ac->port[dir].num_periods = periods;
+
+ spin_unlock_irqrestore(&ac->lock, flags);
+
+ rc = __q6asm_memory_map_regions(ac, dir, period_sz, periods, 1);
+ if (rc < 0) {
+ dev_err(ac->dev, "Memory_map_regions failed\n");
+ q6asm_audio_client_free_buf(ac, &ac->port[dir]);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(q6asm_map_memory_regions);
+
+static void q6asm_audio_client_release(struct kref *ref)
+{
+ struct audio_client *ac;
+ struct q6asm *a;
+ unsigned long flags;
+
+ ac = container_of(ref, struct audio_client, refcount);
+ a = ac->q6asm;
+
+ spin_lock_irqsave(&a->slock, flags);
+ a->session[ac->session] = NULL;
+ spin_unlock_irqrestore(&a->slock, flags);
+
+ kfree(ac);
+}
+
+/**
+ * q6asm_audio_client_free() - Freee allocated audio client
+ *
+ * @ac: audio client to free
+ */
+void q6asm_audio_client_free(struct audio_client *ac)
+{
+ kref_put(&ac->refcount, q6asm_audio_client_release);
+}
+EXPORT_SYMBOL_GPL(q6asm_audio_client_free);
+
+static struct audio_client *q6asm_get_audio_client(struct q6asm *a,
+ int session_id)
+{
+ struct audio_client *ac = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&a->slock, flags);
+ if ((session_id <= 0) || (session_id > MAX_SESSIONS)) {
+ dev_err(a->dev, "invalid session: %d\n", session_id);
+ goto err;
+ }
+
+ /* check for valid session */
+ if (!a->session[session_id])
+ goto err;
+ else if (a->session[session_id]->session != session_id)
+ goto err;
+
+ ac = a->session[session_id];
+ kref_get(&ac->refcount);
+err:
+ spin_unlock_irqrestore(&a->slock, flags);
+ return ac;
+}
+
+static int32_t q6asm_stream_callback(struct apr_device *adev,
+ struct apr_resp_pkt *data,
+ int session_id)
+{
+ struct q6asm *q6asm = dev_get_drvdata(&adev->dev);
+ struct aprv2_ibasic_rsp_result_t *result;
+ struct apr_hdr *hdr = &data->hdr;
+ struct audio_port_data *port;
+ struct audio_client *ac;
+ uint32_t client_event = 0;
+ int ret = 0;
+
+ ac = q6asm_get_audio_client(q6asm, session_id);
+ if (!ac)/* Audio client might already be freed by now */
+ return 0;
+
+ result = data->payload;
+
+ switch (hdr->opcode) {
+ case APR_BASIC_RSP_RESULT:
+ switch (result->opcode) {
+ case ASM_SESSION_CMD_PAUSE:
+ client_event = ASM_CLIENT_EVENT_CMD_PAUSE_DONE;
+ break;
+ case ASM_SESSION_CMD_SUSPEND:
+ client_event = ASM_CLIENT_EVENT_CMD_SUSPEND_DONE;
+ break;
+ case ASM_DATA_CMD_EOS:
+ client_event = ASM_CLIENT_EVENT_CMD_EOS_DONE;
+ break;
+ case ASM_STREAM_CMD_FLUSH:
+ client_event = ASM_CLIENT_EVENT_CMD_FLUSH_DONE;
+ break;
+ case ASM_SESSION_CMD_RUN_V2:
+ client_event = ASM_CLIENT_EVENT_CMD_RUN_DONE;
+ break;
+ case ASM_STREAM_CMD_CLOSE:
+ client_event = ASM_CLIENT_EVENT_CMD_CLOSE_DONE;
+ break;
+ case ASM_STREAM_CMD_FLUSH_READBUFS:
+ client_event = ASM_CLIENT_EVENT_CMD_OUT_FLUSH_DONE;
+ break;
+ case ASM_STREAM_CMD_OPEN_WRITE_V3:
+ case ASM_STREAM_CMD_OPEN_READ_V3:
+ case ASM_STREAM_CMD_OPEN_READWRITE_V2:
+ case ASM_STREAM_CMD_SET_ENCDEC_PARAM:
+ case ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2:
+ if (result->status != 0) {
+ dev_err(ac->dev,
+ "cmd = 0x%x returned error = 0x%x\n",
+ result->opcode, result->status);
+ ac->result = *result;
+ wake_up(&ac->cmd_wait);
+ ret = 0;
+ goto done;
+ }
+ break;
+ default:
+ dev_err(ac->dev, "command[0x%x] not expecting rsp\n",
+ result->opcode);
+ break;
+ }
+
+ ac->result = *result;
+ wake_up(&ac->cmd_wait);
+
+ if (ac->cb)
+ ac->cb(client_event, hdr->token,
+ data->payload, ac->priv);
+
+ ret = 0;
+ goto done;
+
+ case ASM_DATA_EVENT_WRITE_DONE_V2:
+ client_event = ASM_CLIENT_EVENT_DATA_WRITE_DONE;
+ if (ac->io_mode & ASM_SYNC_IO_MODE) {
+ phys_addr_t phys;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ac->lock, flags);
+
+ port = &ac->port[SNDRV_PCM_STREAM_PLAYBACK];
+
+ if (!port->buf) {
+ spin_unlock_irqrestore(&ac->lock, flags);
+ ret = 0;
+ goto done;
+ }
+
+ phys = port->buf[hdr->token].phys;
+
+ if (lower_32_bits(phys) != result->opcode ||
+ upper_32_bits(phys) != result->status) {
+ dev_err(ac->dev, "Expected addr %pa\n",
+ &port->buf[hdr->token].phys);
+ spin_unlock_irqrestore(&ac->lock, flags);
+ ret = -EINVAL;
+ goto done;
+ }
+ spin_unlock_irqrestore(&ac->lock, flags);
+ }
+ break;
+ case ASM_DATA_EVENT_READ_DONE_V2:
+ client_event = ASM_CLIENT_EVENT_DATA_READ_DONE;
+ if (ac->io_mode & ASM_SYNC_IO_MODE) {
+ struct asm_data_cmd_read_v2_done *done = data->payload;
+ unsigned long flags;
+ phys_addr_t phys;
+
+ spin_lock_irqsave(&ac->lock, flags);
+ port = &ac->port[SNDRV_PCM_STREAM_CAPTURE];
+ if (!port->buf) {
+ spin_unlock_irqrestore(&ac->lock, flags);
+ ret = 0;
+ goto done;
+ }
+
+ phys = port->buf[hdr->token].phys;
+
+ if (upper_32_bits(phys) != done->buf_addr_msw ||
+ lower_32_bits(phys) != done->buf_addr_lsw) {
+ dev_err(ac->dev, "Expected addr %pa %08x-%08x\n",
+ &port->buf[hdr->token].phys,
+ done->buf_addr_lsw,
+ done->buf_addr_msw);
+ spin_unlock_irqrestore(&ac->lock, flags);
+ ret = -EINVAL;
+ goto done;
+ }
+ spin_unlock_irqrestore(&ac->lock, flags);
+ }
+
+ break;
+ }
+
+ if (ac->cb)
+ ac->cb(client_event, hdr->token, data->payload, ac->priv);
+
+done:
+ kref_put(&ac->refcount, q6asm_audio_client_release);
+ return ret;
+}
+
+static int q6asm_srvc_callback(struct apr_device *adev,
+ struct apr_resp_pkt *data)
+{
+ struct q6asm *q6asm = dev_get_drvdata(&adev->dev);
+ struct aprv2_ibasic_rsp_result_t *result;
+ struct audio_port_data *port;
+ struct audio_client *ac = NULL;
+ struct apr_hdr *hdr = &data->hdr;
+ struct q6asm *a;
+ uint32_t sid = 0;
+ uint32_t dir = 0;
+ int session_id;
+
+ session_id = (hdr->dest_port >> 8) & 0xFF;
+ if (session_id)
+ return q6asm_stream_callback(adev, data, session_id);
+
+ sid = (hdr->token >> 8) & 0x0F;
+ ac = q6asm_get_audio_client(q6asm, sid);
+ if (!ac) {
+ dev_err(&adev->dev, "Audio Client not active\n");
+ return 0;
+ }
+
+ a = dev_get_drvdata(ac->dev->parent);
+ dir = (hdr->token & 0x0F);
+ port = &ac->port[dir];
+ result = data->payload;
+
+ switch (hdr->opcode) {
+ case APR_BASIC_RSP_RESULT:
+ switch (result->opcode) {
+ case ASM_CMD_SHARED_MEM_MAP_REGIONS:
+ case ASM_CMD_SHARED_MEM_UNMAP_REGIONS:
+ ac->result = *result;
+ wake_up(&a->mem_wait);
+ break;
+ default:
+ dev_err(&adev->dev, "command[0x%x] not expecting rsp\n",
+ result->opcode);
+ break;
+ }
+ goto done;
+ case ASM_CMDRSP_SHARED_MEM_MAP_REGIONS:
+ ac->result.status = 0;
+ ac->result.opcode = hdr->opcode;
+ port->mem_map_handle = result->opcode;
+ wake_up(&a->mem_wait);
+ break;
+ case ASM_CMD_SHARED_MEM_UNMAP_REGIONS:
+ ac->result.opcode = hdr->opcode;
+ ac->result.status = 0;
+ port->mem_map_handle = 0;
+ wake_up(&a->mem_wait);
+ break;
+ default:
+ dev_dbg(&adev->dev, "command[0x%x]success [0x%x]\n",
+ result->opcode, result->status);
+ break;
+ }
+
+ if (ac->cb)
+ ac->cb(hdr->opcode, hdr->token, data->payload, ac->priv);
+
+done:
+ kref_put(&ac->refcount, q6asm_audio_client_release);
+
+ return 0;
+}
+
+/**
+ * q6asm_get_session_id() - get session id for audio client
+ *
+ * @c: audio client pointer
+ *
+ * Return: Will be an session id of the audio client.
+ */
+int q6asm_get_session_id(struct audio_client *c)
+{
+ return c->session;
+}
+EXPORT_SYMBOL_GPL(q6asm_get_session_id);
+
+/**
+ * q6asm_audio_client_alloc() - Allocate a new audio client
+ *
+ * @dev: Pointer to asm child device.
+ * @cb: event callback.
+ * @priv: private data associated with this client.
+ * @stream_id: stream id
+ * @perf_mode: performace mode for this client
+ *
+ * Return: Will be an error pointer on error or a valid audio client
+ * on success.
+ */
+struct audio_client *q6asm_audio_client_alloc(struct device *dev, q6asm_cb cb,
+ void *priv, int stream_id,
+ int perf_mode)
+{
+ struct q6asm *a = dev_get_drvdata(dev->parent);
+ struct audio_client *ac;
+ unsigned long flags;
+
+ ac = q6asm_get_audio_client(a, stream_id + 1);
+ if (ac) {
+ dev_err(dev, "Audio Client already active\n");
+ return ac;
+ }
+
+ ac = kzalloc(sizeof(*ac), GFP_KERNEL);
+ if (!ac)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_irqsave(&a->slock, flags);
+ a->session[stream_id + 1] = ac;
+ spin_unlock_irqrestore(&a->slock, flags);
+ ac->session = stream_id + 1;
+ ac->cb = cb;
+ ac->dev = dev;
+ ac->q6asm = a;
+ ac->priv = priv;
+ ac->io_mode = ASM_SYNC_IO_MODE;
+ ac->perf_mode = perf_mode;
+ /* DSP expects stream id from 1 */
+ ac->stream_id = 1;
+ ac->adev = a->adev;
+ kref_init(&ac->refcount);
+
+ init_waitqueue_head(&ac->cmd_wait);
+ mutex_init(&ac->cmd_lock);
+ spin_lock_init(&ac->lock);
+
+ return ac;
+}
+EXPORT_SYMBOL_GPL(q6asm_audio_client_alloc);
+
+static int q6asm_ac_send_cmd_sync(struct audio_client *ac, struct apr_pkt *pkt)
+{
+ struct apr_hdr *hdr = &pkt->hdr;
+ int rc;
+
+ mutex_lock(&ac->cmd_lock);
+ ac->result.opcode = 0;
+ ac->result.status = 0;
+
+ rc = apr_send_pkt(ac->adev, pkt);
+ if (rc < 0)
+ goto err;
+
+ rc = wait_event_timeout(ac->cmd_wait,
+ (ac->result.opcode == hdr->opcode), 5 * HZ);
+ if (!rc) {
+ dev_err(ac->dev, "CMD timeout\n");
+ rc = -ETIMEDOUT;
+ goto err;
+ }
+
+ if (ac->result.status > 0) {
+ dev_err(ac->dev, "DSP returned error[%x]\n",
+ ac->result.status);
+ rc = -EINVAL;
+ } else {
+ rc = 0;
+ }
+
+
+err:
+ mutex_unlock(&ac->cmd_lock);
+ return rc;
+}
+
+/**
+ * q6asm_open_write() - Open audio client for writing
+ *
+ * @ac: audio client pointer
+ * @format: audio sample format
+ * @bits_per_sample: bits per sample
+ *
+ * Return: Will be an negative value on error or zero on success
+ */
+int q6asm_open_write(struct audio_client *ac, uint32_t format,
+ uint16_t bits_per_sample)
+{
+ struct asm_stream_cmd_open_write_v3 *open;
+ struct apr_pkt *pkt;
+ void *p;
+ int rc, pkt_size;
+
+ pkt_size = APR_HDR_SIZE + sizeof(*open);
+
+ p = kzalloc(pkt_size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ open = p + APR_HDR_SIZE;
+ q6asm_add_hdr(ac, &pkt->hdr, pkt_size, true, ac->stream_id);
+
+ pkt->hdr.opcode = ASM_STREAM_CMD_OPEN_WRITE_V3;
+ open->mode_flags = 0x00;
+ open->mode_flags |= ASM_LEGACY_STREAM_SESSION;
+
+ /* source endpoint : matrix */
+ open->sink_endpointype = ASM_END_POINT_DEVICE_MATRIX;
+ open->bits_per_sample = bits_per_sample;
+ open->postprocopo_id = ASM_NULL_POPP_TOPOLOGY;
+
+ switch (format) {
+ case FORMAT_LINEAR_PCM:
+ open->dec_fmt_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2;
+ break;
+ default:
+ dev_err(ac->dev, "Invalid format 0x%x\n", format);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ rc = q6asm_ac_send_cmd_sync(ac, pkt);
+ if (rc < 0)
+ goto err;
+
+ ac->io_mode |= ASM_TUN_WRITE_IO_MODE;
+
+err:
+ kfree(pkt);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(q6asm_open_write);
+
+static int __q6asm_run(struct audio_client *ac, uint32_t flags,
+ uint32_t msw_ts, uint32_t lsw_ts, bool wait)
+{
+ struct asm_session_cmd_run_v2 *run;
+ struct apr_pkt *pkt;
+ int pkt_size, rc;
+ void *p;
+
+ pkt_size = APR_HDR_SIZE + sizeof(*run);
+ p = kzalloc(pkt_size, GFP_ATOMIC);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ run = p + APR_HDR_SIZE;
+
+ q6asm_add_hdr(ac, &pkt->hdr, pkt_size, true, ac->stream_id);
+
+ pkt->hdr.opcode = ASM_SESSION_CMD_RUN_V2;
+ run->flags = flags;
+ run->time_lsw = lsw_ts;
+ run->time_msw = msw_ts;
+ if (wait) {
+ rc = q6asm_ac_send_cmd_sync(ac, pkt);
+ } else {
+ rc = apr_send_pkt(ac->adev, pkt);
+ if (rc == pkt_size)
+ rc = 0;
+ }
+
+ kfree(pkt);
+ return rc;
+}
+
+/**
+ * q6asm_run() - start the audio client
+ *
+ * @ac: audio client pointer
+ * @flags: flags associated with write
+ * @msw_ts: timestamp msw
+ * @lsw_ts: timestamp lsw
+ *
+ * Return: Will be an negative value on error or zero on success
+ */
+int q6asm_run(struct audio_client *ac, uint32_t flags,
+ uint32_t msw_ts, uint32_t lsw_ts)
+{
+ return __q6asm_run(ac, flags, msw_ts, lsw_ts, true);
+}
+EXPORT_SYMBOL_GPL(q6asm_run);
+
+/**
+ * q6asm_run_nowait() - start the audio client withou blocking
+ *
+ * @ac: audio client pointer
+ * @flags: flags associated with write
+ * @msw_ts: timestamp msw
+ * @lsw_ts: timestamp lsw
+ *
+ * Return: Will be an negative value on error or zero on success
+ */
+int q6asm_run_nowait(struct audio_client *ac, uint32_t flags,
+ uint32_t msw_ts, uint32_t lsw_ts)
+{
+ return __q6asm_run(ac, flags, msw_ts, lsw_ts, false);
+}
+EXPORT_SYMBOL_GPL(q6asm_run_nowait);
+
+/**
+ * q6asm_media_format_block_multi_ch_pcm() - setup pcm configuration
+ *
+ * @ac: audio client pointer
+ * @rate: audio sample rate
+ * @channels: number of audio channels.
+ * @channel_map: channel map pointer
+ * @bits_per_sample: bits per sample
+ *
+ * Return: Will be an negative value on error or zero on success
+ */
+int q6asm_media_format_block_multi_ch_pcm(struct audio_client *ac,
+ uint32_t rate, uint32_t channels,
+ u8 channel_map[PCM_MAX_NUM_CHANNEL],
+ uint16_t bits_per_sample)
+{
+ struct asm_multi_channel_pcm_fmt_blk_v2 *fmt;
+ struct apr_pkt *pkt;
+ u8 *channel_mapping;
+ void *p;
+ int rc, pkt_size;
+
+ pkt_size = APR_HDR_SIZE + sizeof(*fmt);
+ p = kzalloc(pkt_size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ fmt = p + APR_HDR_SIZE;
+
+ q6asm_add_hdr(ac, &pkt->hdr, pkt_size, true, ac->stream_id);
+
+ pkt->hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+ fmt->fmt_blk.fmt_blk_size = sizeof(*fmt) - sizeof(fmt->fmt_blk);
+ fmt->num_channels = channels;
+ fmt->bits_per_sample = bits_per_sample;
+ fmt->sample_rate = rate;
+ fmt->is_signed = 1;
+
+ channel_mapping = fmt->channel_mapping;
+
+ if (channel_map) {
+ memcpy(channel_mapping, channel_map, PCM_MAX_NUM_CHANNEL);
+ } else {
+ if (q6dsp_map_channels(channel_mapping, channels)) {
+ dev_err(ac->dev, " map channels failed %d\n", channels);
+ rc = -EINVAL;
+ goto err;
+ }
+ }
+
+ rc = q6asm_ac_send_cmd_sync(ac, pkt);
+
+err:
+ kfree(pkt);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(q6asm_media_format_block_multi_ch_pcm);
+
+/**
+ * q6asm_enc_cfg_blk_pcm_format_support() - setup pcm configuration for capture
+ *
+ * @ac: audio client pointer
+ * @rate: audio sample rate
+ * @channels: number of audio channels.
+ * @bits_per_sample: bits per sample
+ *
+ * Return: Will be an negative value on error or zero on success
+ */
+int q6asm_enc_cfg_blk_pcm_format_support(struct audio_client *ac,
+ uint32_t rate, uint32_t channels, uint16_t bits_per_sample)
+{
+ struct asm_multi_channel_pcm_enc_cfg_v2 *enc_cfg;
+ struct apr_pkt *pkt;
+ u8 *channel_mapping;
+ u32 frames_per_buf = 0;
+ int pkt_size, rc;
+ void *p;
+
+ pkt_size = APR_HDR_SIZE + sizeof(*enc_cfg);
+ p = kzalloc(pkt_size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ enc_cfg = p + APR_HDR_SIZE;
+ q6asm_add_hdr(ac, &pkt->hdr, pkt_size, true, ac->stream_id);
+
+ pkt->hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+ enc_cfg->encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
+ enc_cfg->encdec.param_size = sizeof(*enc_cfg) - sizeof(enc_cfg->encdec);
+ enc_cfg->encblk.frames_per_buf = frames_per_buf;
+ enc_cfg->encblk.enc_cfg_blk_size = enc_cfg->encdec.param_size -
+ sizeof(struct asm_enc_cfg_blk_param_v2);
+
+ enc_cfg->num_channels = channels;
+ enc_cfg->bits_per_sample = bits_per_sample;
+ enc_cfg->sample_rate = rate;
+ enc_cfg->is_signed = 1;
+ channel_mapping = enc_cfg->channel_mapping;
+
+ if (q6dsp_map_channels(channel_mapping, channels)) {
+ rc = -EINVAL;
+ goto err;
+ }
+
+ rc = q6asm_ac_send_cmd_sync(ac, pkt);
+err:
+ kfree(pkt);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(q6asm_enc_cfg_blk_pcm_format_support);
+
+/**
+ * q6asm_read() - read data of period size from audio client
+ *
+ * @ac: audio client pointer
+ *
+ * Return: Will be an negative value on error or zero on success
+ */
+int q6asm_read(struct audio_client *ac)
+{
+ struct asm_data_cmd_read_v2 *read;
+ struct audio_port_data *port;
+ struct audio_buffer *ab;
+ struct apr_pkt *pkt;
+ unsigned long flags;
+ int pkt_size;
+ int rc = 0;
+ void *p;
+
+ pkt_size = APR_HDR_SIZE + sizeof(*read);
+ p = kzalloc(pkt_size, GFP_ATOMIC);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ read = p + APR_HDR_SIZE;
+
+ spin_lock_irqsave(&ac->lock, flags);
+ port = &ac->port[SNDRV_PCM_STREAM_CAPTURE];
+ q6asm_add_hdr(ac, &pkt->hdr, pkt_size, false, ac->stream_id);
+ ab = &port->buf[port->dsp_buf];
+ pkt->hdr.opcode = ASM_DATA_CMD_READ_V2;
+ read->buf_addr_lsw = lower_32_bits(ab->phys);
+ read->buf_addr_msw = upper_32_bits(ab->phys);
+ read->mem_map_handle = port->mem_map_handle;
+
+ read->buf_size = ab->size;
+ read->seq_id = port->dsp_buf;
+ pkt->hdr.token = port->dsp_buf;
+
+ port->dsp_buf++;
+
+ if (port->dsp_buf >= port->num_periods)
+ port->dsp_buf = 0;
+
+ spin_unlock_irqrestore(&ac->lock, flags);
+ rc = apr_send_pkt(ac->adev, pkt);
+ if (rc == pkt_size)
+ rc = 0;
+ else
+ pr_err("read op[0x%x]rc[%d]\n", pkt->hdr.opcode, rc);
+
+ kfree(pkt);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(q6asm_read);
+
+static int __q6asm_open_read(struct audio_client *ac,
+ uint32_t format, uint16_t bits_per_sample)
+{
+ struct asm_stream_cmd_open_read_v3 *open;
+ struct apr_pkt *pkt;
+ int pkt_size, rc;
+ void *p;
+
+ pkt_size = APR_HDR_SIZE + sizeof(*open);
+ p = kzalloc(pkt_size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ open = p + APR_HDR_SIZE;
+
+ q6asm_add_hdr(ac, &pkt->hdr, pkt_size, true, ac->stream_id);
+ pkt->hdr.opcode = ASM_STREAM_CMD_OPEN_READ_V3;
+ /* Stream prio : High, provide meta info with encoded frames */
+ open->src_endpointype = ASM_END_POINT_DEVICE_MATRIX;
+
+ open->preprocopo_id = ASM_STREAM_POSTPROC_TOPO_ID_NONE;
+ open->bits_per_sample = bits_per_sample;
+ open->mode_flags = 0x0;
+
+ open->mode_flags |= ASM_LEGACY_STREAM_SESSION <<
+ ASM_SHIFT_STREAM_PERF_MODE_FLAG_IN_OPEN_READ;
+
+ switch (format) {
+ case FORMAT_LINEAR_PCM:
+ open->mode_flags |= 0x00;
+ open->enc_cfg_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2;
+ break;
+ default:
+ pr_err("Invalid format[%d]\n", format);
+ }
+
+ rc = q6asm_ac_send_cmd_sync(ac, pkt);
+
+ kfree(pkt);
+ return rc;
+}
+
+/**
+ * q6asm_open_read() - Open audio client for reading
+ *
+ * @ac: audio client pointer
+ * @format: audio sample format
+ * @bits_per_sample: bits per sample
+ *
+ * Return: Will be an negative value on error or zero on success
+ */
+int q6asm_open_read(struct audio_client *ac, uint32_t format,
+ uint16_t bits_per_sample)
+{
+ return __q6asm_open_read(ac, format, bits_per_sample);
+}
+EXPORT_SYMBOL_GPL(q6asm_open_read);
+
+/**
+ * q6asm_write_async() - non blocking write
+ *
+ * @ac: audio client pointer
+ * @len: lenght in bytes
+ * @msw_ts: timestamp msw
+ * @lsw_ts: timestamp lsw
+ * @wflags: flags associated with write
+ *
+ * Return: Will be an negative value on error or zero on success
+ */
+int q6asm_write_async(struct audio_client *ac, uint32_t len, uint32_t msw_ts,
+ uint32_t lsw_ts, uint32_t wflags)
+{
+ struct asm_data_cmd_write_v2 *write;
+ struct audio_port_data *port;
+ struct audio_buffer *ab;
+ unsigned long flags;
+ struct apr_pkt *pkt;
+ int pkt_size;
+ int rc = 0;
+ void *p;
+
+ pkt_size = APR_HDR_SIZE + sizeof(*write);
+ p = kzalloc(pkt_size, GFP_ATOMIC);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ write = p + APR_HDR_SIZE;
+
+ spin_lock_irqsave(&ac->lock, flags);
+ port = &ac->port[SNDRV_PCM_STREAM_PLAYBACK];
+ q6asm_add_hdr(ac, &pkt->hdr, pkt_size, false, ac->stream_id);
+
+ ab = &port->buf[port->dsp_buf];
+ pkt->hdr.token = port->dsp_buf;
+ pkt->hdr.opcode = ASM_DATA_CMD_WRITE_V2;
+ write->buf_addr_lsw = lower_32_bits(ab->phys);
+ write->buf_addr_msw = upper_32_bits(ab->phys);
+ write->buf_size = len;
+ write->seq_id = port->dsp_buf;
+ write->timestamp_lsw = lsw_ts;
+ write->timestamp_msw = msw_ts;
+ write->mem_map_handle =
+ ac->port[SNDRV_PCM_STREAM_PLAYBACK].mem_map_handle;
+
+ if (wflags == NO_TIMESTAMP)
+ write->flags = (wflags & 0x800000FF);
+ else
+ write->flags = (0x80000000 | wflags);
+
+ port->dsp_buf++;
+
+ if (port->dsp_buf >= port->num_periods)
+ port->dsp_buf = 0;
+
+ spin_unlock_irqrestore(&ac->lock, flags);
+ rc = apr_send_pkt(ac->adev, pkt);
+ if (rc == pkt_size)
+ rc = 0;
+
+ kfree(pkt);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(q6asm_write_async);
+
+static void q6asm_reset_buf_state(struct audio_client *ac)
+{
+ struct audio_port_data *port = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ac->lock, flags);
+ port = &ac->port[SNDRV_PCM_STREAM_PLAYBACK];
+ port->dsp_buf = 0;
+ port = &ac->port[SNDRV_PCM_STREAM_CAPTURE];
+ port->dsp_buf = 0;
+ spin_unlock_irqrestore(&ac->lock, flags);
+}
+
+static int __q6asm_cmd(struct audio_client *ac, int cmd, bool wait)
+{
+ int stream_id = ac->stream_id;
+ struct apr_pkt pkt;
+ int rc;
+
+ q6asm_add_hdr(ac, &pkt.hdr, APR_HDR_SIZE, true, stream_id);
+
+ switch (cmd) {
+ case CMD_PAUSE:
+ pkt.hdr.opcode = ASM_SESSION_CMD_PAUSE;
+ break;
+ case CMD_SUSPEND:
+ pkt.hdr.opcode = ASM_SESSION_CMD_SUSPEND;
+ break;
+ case CMD_FLUSH:
+ pkt.hdr.opcode = ASM_STREAM_CMD_FLUSH;
+ break;
+ case CMD_OUT_FLUSH:
+ pkt.hdr.opcode = ASM_STREAM_CMD_FLUSH_READBUFS;
+ break;
+ case CMD_EOS:
+ pkt.hdr.opcode = ASM_DATA_CMD_EOS;
+ break;
+ case CMD_CLOSE:
+ pkt.hdr.opcode = ASM_STREAM_CMD_CLOSE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (wait)
+ rc = q6asm_ac_send_cmd_sync(ac, &pkt);
+ else
+ return apr_send_pkt(ac->adev, &pkt);
+
+ if (rc < 0)
+ return rc;
+
+ if (cmd == CMD_FLUSH)
+ q6asm_reset_buf_state(ac);
+
+ return 0;
+}
+
+/**
+ * q6asm_cmd() - run cmd on audio client
+ *
+ * @ac: audio client pointer
+ * @cmd: command to run on audio client.
+ *
+ * Return: Will be an negative value on error or zero on success
+ */
+int q6asm_cmd(struct audio_client *ac, int cmd)
+{
+ return __q6asm_cmd(ac, cmd, true);
+}
+EXPORT_SYMBOL_GPL(q6asm_cmd);
+
+/**
+ * q6asm_cmd_nowait() - non blocking, run cmd on audio client
+ *
+ * @ac: audio client pointer
+ * @cmd: command to run on audio client.
+ *
+ * Return: Will be an negative value on error or zero on success
+ */
+int q6asm_cmd_nowait(struct audio_client *ac, int cmd)
+{
+ return __q6asm_cmd(ac, cmd, false);
+}
+EXPORT_SYMBOL_GPL(q6asm_cmd_nowait);
+
+static int q6asm_probe(struct apr_device *adev)
+{
+ struct device *dev = &adev->dev;
+ struct device_node *dais_np;
+ struct q6asm *q6asm;
+
+ q6asm = devm_kzalloc(dev, sizeof(*q6asm), GFP_KERNEL);
+ if (!q6asm)
+ return -ENOMEM;
+
+ q6core_get_svc_api_info(adev->svc_id, &q6asm->ainfo);
+
+ q6asm->dev = dev;
+ q6asm->adev = adev;
+ init_waitqueue_head(&q6asm->mem_wait);
+ spin_lock_init(&q6asm->slock);
+ dev_set_drvdata(dev, q6asm);
+
+ dais_np = of_get_child_by_name(dev->of_node, "dais");
+ if (dais_np) {
+ q6asm->pdev_dais = of_platform_device_create(dais_np,
+ "q6asm-dai", dev);
+ of_node_put(dais_np);
+ }
+
+ return 0;
+}
+
+static int q6asm_remove(struct apr_device *adev)
+{
+ struct q6asm *q6asm = dev_get_drvdata(&adev->dev);
+
+ if (q6asm->pdev_dais)
+ of_platform_device_destroy(&q6asm->pdev_dais->dev, NULL);
+
+ return 0;
+}
+static const struct of_device_id q6asm_device_id[] = {
+ { .compatible = "qcom,q6asm" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, q6asm_device_id);
+
+static struct apr_driver qcom_q6asm_driver = {
+ .probe = q6asm_probe,
+ .remove = q6asm_remove,
+ .callback = q6asm_srvc_callback,
+ .driver = {
+ .name = "qcom-q6asm",
+ .of_match_table = of_match_ptr(q6asm_device_id),
+ },
+};
+
+module_apr_driver(qcom_q6asm_driver);
+MODULE_DESCRIPTION("Q6 Audio Stream Manager driver");
+MODULE_LICENSE("GPL v2");
diff --git a/rr-cache/c9867e004c11ba39a2594258c2956921203f991b/preimage b/rr-cache/c9867e004c11ba39a2594258c2956921203f991b/preimage
new file mode 100644
index 0000000..a7c06a1
--- /dev/null
+++ b/rr-cache/c9867e004c11ba39a2594258c2956921203f991b/preimage
@@ -0,0 +1,2820 @@
+// SPDX-License-Identifier: GPL-2.0
+<<<<<<<
+/*
+ * Copyright (c) 2011-2017, The Linux Foundation
+ * Copyright (c) 2018, Linaro Limited
+ */
+=======
+// Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+// Copyright (c) 2018, Linaro Limited
+>>>>>>>
+
+#include <linux/mutex.h>
+#include <linux/wait.h>
+#include <linux/module.h>
+#include <linux/soc/qcom/apr.h>
+#include <linux/device.h>
+<<<<<<<
+#include <linux/of.h>
+=======
+#include <linux/of_platform.h>
+#include <linux/spinlock.h>
+#include <linux/kref.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+>>>>>>>
+#include <uapi/sound/asound.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include "q6asm.h"
+#include "q6core.h"
+#include "q6dsp-errno.h"
+#include "q6dsp-common.h"
+
+#define ASM_STREAM_CMD_CLOSE 0x00010BCD
+#define ASM_STREAM_CMD_FLUSH 0x00010BCE
+#define ASM_SESSION_CMD_PAUSE 0x00010BD3
+#define ASM_DATA_CMD_EOS 0x00010BDB
+<<<<<<<
+#define ASM_DEFAULT_POPP_TOPOLOGY 0x00010BE4
+=======
+#define ASM_NULL_POPP_TOPOLOGY 0x00010C68
+>>>>>>>
+#define ASM_STREAM_CMD_FLUSH_READBUFS 0x00010C09
+#define ASM_STREAM_CMD_SET_ENCDEC_PARAM 0x00010C10
+#define ASM_STREAM_POSTPROC_TOPO_ID_NONE 0x00010C68
+#define ASM_CMD_SHARED_MEM_MAP_REGIONS 0x00010D92
+#define ASM_CMDRSP_SHARED_MEM_MAP_REGIONS 0x00010D93
+#define ASM_CMD_SHARED_MEM_UNMAP_REGIONS 0x00010D94
+#define ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2 0x00010D98
+#define ASM_DATA_EVENT_WRITE_DONE_V2 0x00010D99
+#define ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2 0x00010DA3
+#define ASM_SESSION_CMD_RUN_V2 0x00010DAA
+#define ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2 0x00010DA5
+#define ASM_DATA_CMD_WRITE_V2 0x00010DAB
+#define ASM_DATA_CMD_READ_V2 0x00010DAC
+#define ASM_SESSION_CMD_SUSPEND 0x00010DEC
+#define ASM_STREAM_CMD_OPEN_WRITE_V3 0x00010DB3
+#define ASM_STREAM_CMD_OPEN_READ_V3 0x00010DB4
+#define ASM_DATA_EVENT_READ_DONE_V2 0x00010D9A
+#define ASM_STREAM_CMD_OPEN_READWRITE_V2 0x00010D8D
+
+
+#define ASM_LEGACY_STREAM_SESSION 0
+/* Bit shift for the stream_perf_mode subfield. */
+#define ASM_SHIFT_STREAM_PERF_MODE_FLAG_IN_OPEN_READ 29
+#define ASM_END_POINT_DEVICE_MATRIX 0
+#define ASM_DEFAULT_APP_TYPE 0
+#define ASM_SYNC_IO_MODE 0x0001
+#define ASM_ASYNC_IO_MODE 0x0002
+#define ASM_TUN_READ_IO_MODE 0x0004 /* tunnel read write mode */
+#define ASM_TUN_WRITE_IO_MODE 0x0008 /* tunnel read write mode */
+#define ASM_SHIFT_GAPLESS_MODE_FLAG 31
+#define ADSP_MEMORY_MAP_SHMEM8_4K_POOL 3
+
+struct avs_cmd_shared_mem_map_regions {
+<<<<<<<
+=======
+ struct apr_hdr hdr;
+>>>>>>>
+ u16 mem_pool_id;
+ u16 num_regions;
+ u32 property_flag;
+} __packed;
+
+struct avs_shared_map_region_payload {
+ u32 shm_addr_lsw;
+ u32 shm_addr_msw;
+ u32 mem_size_bytes;
+} __packed;
+
+struct avs_cmd_shared_mem_unmap_regions {
+<<<<<<<
+=======
+ struct apr_hdr hdr;
+>>>>>>>
+ u32 mem_map_handle;
+} __packed;
+
+struct asm_data_cmd_media_fmt_update_v2 {
+ u32 fmt_blk_size;
+} __packed;
+
+struct asm_multi_channel_pcm_fmt_blk_v2 {
+<<<<<<<
+=======
+ struct apr_hdr hdr;
+>>>>>>>
+ struct asm_data_cmd_media_fmt_update_v2 fmt_blk;
+ u16 num_channels;
+ u16 bits_per_sample;
+ u32 sample_rate;
+ u16 is_signed;
+ u16 reserved;
+<<<<<<<
+ u8 channel_mapping[PCM_FORMAT_MAX_NUM_CHANNEL];
+=======
+ u8 channel_mapping[PCM_MAX_NUM_CHANNEL];
+>>>>>>>
+} __packed;
+
+struct asm_stream_cmd_set_encdec_param {
+ u32 param_id;
+ u32 param_size;
+} __packed;
+
+struct asm_enc_cfg_blk_param_v2 {
+ u32 frames_per_buf;
+ u32 enc_cfg_blk_size;
+} __packed;
+
+struct asm_multi_channel_pcm_enc_cfg_v2 {
+<<<<<<<
+=======
+ struct apr_hdr hdr;
+>>>>>>>
+ struct asm_stream_cmd_set_encdec_param encdec;
+ struct asm_enc_cfg_blk_param_v2 encblk;
+ uint16_t num_channels;
+ uint16_t bits_per_sample;
+ uint32_t sample_rate;
+ uint16_t is_signed;
+ uint16_t reserved;
+ uint8_t channel_mapping[8];
+} __packed;
+
+struct asm_data_cmd_read_v2 {
+<<<<<<<
+=======
+ struct apr_hdr hdr;
+>>>>>>>
+ u32 buf_addr_lsw;
+ u32 buf_addr_msw;
+ u32 mem_map_handle;
+ u32 buf_size;
+ u32 seq_id;
+} __packed;
+
+struct asm_data_cmd_read_v2_done {
+ u32 status;
+ u32 buf_addr_lsw;
+ u32 buf_addr_msw;
+};
+
+struct asm_stream_cmd_open_read_v3 {
+<<<<<<<
+=======
+ struct apr_hdr hdr;
+>>>>>>>
+ u32 mode_flags;
+ u32 src_endpointype;
+ u32 preprocopo_id;
+ u32 enc_cfg_id;
+ u16 bits_per_sample;
+ u16 reserved;
+} __packed;
+
+struct asm_data_cmd_write_v2 {
+<<<<<<<
+=======
+ struct apr_hdr hdr;
+>>>>>>>
+ u32 buf_addr_lsw;
+ u32 buf_addr_msw;
+ u32 mem_map_handle;
+ u32 buf_size;
+ u32 seq_id;
+ u32 timestamp_lsw;
+ u32 timestamp_msw;
+ u32 flags;
+} __packed;
+
+struct asm_stream_cmd_open_write_v3 {
+<<<<<<<
+=======
+ struct apr_hdr hdr;
+>>>>>>>
+ uint32_t mode_flags;
+ uint16_t sink_endpointype;
+ uint16_t bits_per_sample;
+ uint32_t postprocopo_id;
+ uint32_t dec_fmt_id;
+} __packed;
+
+struct asm_session_cmd_run_v2 {
+<<<<<<<
+=======
+ struct apr_hdr hdr;
+>>>>>>>
+ u32 flags;
+ u32 time_lsw;
+ u32 time_msw;
+} __packed;
+
+struct audio_buffer {
+ phys_addr_t phys;
+<<<<<<<
+=======
+ uint32_t used;
+>>>>>>>
+ uint32_t size; /* size of buffer */
+};
+
+struct audio_port_data {
+ struct audio_buffer *buf;
+ uint32_t num_periods;
+ uint32_t dsp_buf;
+ uint32_t mem_map_handle;
+};
+
+<<<<<<<
+struct audio_client {
+ int session;
+ q6asm_cb cb;
+ int cmd_state;
+ void *priv;
+ uint32_t io_mode;
+ struct apr_device *adev;
+ struct mutex lock;
+ /* idx:1 out port, 0: in port */
+ struct audio_port_data port[2];
+ wait_queue_head_t cmd_wait;
+ int perf_mode;
+ int stream_id;
+ struct device *dev;
+};
+
+struct q6asm_ops {
+ int (*memory_unmap)(struct audio_client *ac,
+ phys_addr_t buf_add, int dir);
+ int (*memory_map_regions)(struct audio_client *ac, int dir,
+ size_t period_sz, unsigned int periods,
+ bool is_contiguous);
+ int (*open_write)(struct audio_client *ac, uint32_t format,
+ uint16_t bits_per_sample);
+ int (*run)(struct audio_client *ac, uint32_t flags,
+ uint32_t msw_ts, uint32_t lsw_ts, bool wait);
+ int (*format_block_multi_ch_pcm)(struct audio_client *ac,
+ uint32_t rate, uint32_t channels,
+ u8 channel_map[PCM_FORMAT_MAX_NUM_CHANNEL],
+ uint16_t bits_per_sample);
+ int (*write_nolock)(struct audio_client *ac, uint32_t len, uint32_t msw_ts,
+ uint32_t lsw_ts, uint32_t flags);
+ int (*cmd)(struct audio_client *ac, int cmd, bool wait);
+};
+
+
+struct q6asm {
+ struct apr_device *adev;
+ int mem_state;
+ struct device *dev;
+ wait_queue_head_t mem_wait;
+ struct mutex session_lock;
+ struct platform_device *pcmdev;
+ struct audio_client *session[MAX_SESSIONS + 1];
+ void *dai_data;
+ struct q6asm_ops *ops;
+};
+
+/****** V1 ***/
+/* bit 0:1 represents priority of stream */
+#define STREAM_PRIORITY_NORMAL 0x0000
+#define STREAM_PRIORITY_LOW 0x0001
+#define STREAM_PRIORITY_HIGH 0x0002
+
+/* Supported formats */
+#define LINEAR_PCM 0x00010BE5
+
+#define ASM_STREAM_CMD_OPEN_WRITE 0x00010BCA
+#define ASM_STREAM_CMD_OPEN_WRITE_V2_1 0x00010DB1
+struct asm_stream_cmd_open_write_v1 {
+ struct apr_hdr hdr;
+ u32 uMode;
+ u16 sink_endpoint;
+ u16 stream_handle;
+ u32 post_proc_top;
+ u32 format;
+} __attribute__((packed));
+
+#define ASM_DATA_CMD_MEDIA_FORMAT_UPDATE 0x00010BDC
+#define ASM_DATA_EVENT_ENC_SR_CM_NOTIFY 0x00010BDE
+
+struct asm_pcm_cfg {
+ u16 ch_cfg;
+ u16 bits_per_sample;
+ u32 sample_rate;
+ u16 is_signed;
+ u16 interleaved;
+};
+
+struct asm_stream_media_format_update_v1{
+ struct apr_hdr hdr;
+ u32 format;
+ u32 cfg_size;
+ union {
+ struct asm_pcm_cfg pcm_cfg;
+ } __attribute__((packed)) write_cfg;
+} __attribute__((packed));
+
+
+#define ASM_DATA_CMD_WRITE 0x00010BD9
+struct asm_stream_cmd_write_v1{
+ struct apr_hdr hdr;
+ u32 buf_add;
+ u32 avail_bytes;
+ u32 uid;
+ u32 msw_ts;
+ u32 lsw_ts;
+ u32 uflags;
+} __attribute__((packed));
+
+#define ASM_DATA_EVENT_WRITE_DONE 0x00010BDF
+struct asm_data_event_write_done{
+ u32 buf_add;
+ u32 status;
+} __attribute__((packed));
+
+#define ASM_SESSION_CMD_RUN 0x00010BD2
+struct asm_stream_cmd_run_v1{
+ struct apr_hdr hdr;
+ u32 flags;
+ u32 msw_ts;
+ u32 lsw_ts;
+} __attribute__((packed));
+
+/* Session Level commands */
+#define ASM_SESSION_CMD_MEMORY_MAP 0x00010C32
+struct asm_stream_cmd_memory_map{
+ struct apr_hdr hdr;
+ u32 buf_add;
+ u32 buf_size;
+ u16 mempool_id;
+ u16 reserved;
+} __attribute__((packed));
+
+#define ASM_SESSION_CMD_MEMORY_UNMAP 0x00010C33
+struct asm_stream_cmd_memory_unmap{
+ struct apr_hdr hdr;
+ u32 buf_add;
+} __attribute__((packed));
+
+#define ASM_SESSION_CMD_MEMORY_MAP_REGIONS 0x00010C45
+struct asm_memory_map_regions{
+ u32 phys;
+ u32 buf_size;
+} __attribute__((packed));
+
+struct asm_stream_cmd_memory_map_regions{
+ struct apr_hdr hdr;
+ u16 mempool_id;
+ u16 nregions;
+} __attribute__((packed));
+
+#define ASM_SESSION_CMD_MEMORY_UNMAP_REGIONS 0x00010C46
+struct asm_memory_unmap_regions{
+ u32 phys;
+} __attribute__((packed));
+
+struct asm_stream_cmd_memory_unmap_regions{
+ struct apr_hdr hdr;
+ u16 nregions;
+ u16 reserved;
+} __attribute__((packed));
+
+/**********/
+static bool q6asm_is_valid_audio_client(struct audio_client *ac)
+{
+ struct q6asm *a = dev_get_drvdata(ac->dev);
+ int n;
+
+ if (!ac)
+ return false;
+
+ for (n = 1; n <= MAX_SESSIONS; n++) {
+ if (a->session[n] == ac)
+ return true;
+ }
+
+ return false;
+}
+
+=======
+struct q6asm {
+ struct apr_device *adev;
+ struct device *dev;
+ struct q6core_svc_api_info ainfo;
+ wait_queue_head_t mem_wait;
+ struct platform_device *pcmdev;
+ spinlock_t slock;
+ struct audio_client *session[MAX_SESSIONS + 1];
+ struct platform_device *pdev_dais;
+};
+
+struct audio_client {
+ int session;
+ q6asm_cb cb;
+ void *priv;
+ uint32_t io_mode;
+ struct apr_device *adev;
+ struct mutex cmd_lock;
+ spinlock_t lock;
+ struct kref refcount;
+ /* idx:1 out port, 0: in port */
+ struct audio_port_data port[2];
+ wait_queue_head_t cmd_wait;
+ struct aprv2_ibasic_rsp_result_t result;
+ int perf_mode;
+ int stream_id;
+ struct q6asm *q6asm;
+ struct device *dev;
+};
+
+>>>>>>>
+static inline void q6asm_add_hdr(struct audio_client *ac, struct apr_hdr *hdr,
+ uint32_t pkt_size, bool cmd_flg,
+ uint32_t stream_id)
+{
+ hdr->hdr_field = APR_SEQ_CMD_HDR_FIELD;
+<<<<<<<
+=======
+ hdr->src_svc = ac->adev->svc_id;
+ hdr->src_domain = APR_DOMAIN_APPS;
+ hdr->dest_svc = APR_SVC_ASM;
+ hdr->dest_domain = APR_DOMAIN_ADSP;
+>>>>>>>
+ hdr->src_port = ((ac->session << 8) & 0xFF00) | (stream_id);
+ hdr->dest_port = ((ac->session << 8) & 0xFF00) | (stream_id);
+ hdr->pkt_size = pkt_size;
+ if (cmd_flg)
+ hdr->token = ac->session;
+}
+
+static int q6asm_apr_send_session_pkt(struct q6asm *a, struct audio_client *ac,
+<<<<<<<
+ struct apr_pkt *pkt, uint32_t rsp_opcode)
+{
+ struct apr_hdr *hdr = &pkt->hdr;
+ int rc;
+
+ mutex_lock(&ac->cmd_lock);
+ ac->result.opcode = 0;
+ ac->result.status = 0;
+ rc = apr_send_pkt(a->adev, pkt);
+ if (rc < 0)
+ goto err;
+
+ if (rsp_opcode)
+ rc = wait_event_timeout(a->mem_wait,
+ (ac->result.opcode == hdr->opcode) ||
+ (ac->result.opcode == rsp_opcode),
+ 5 * HZ);
+ else
+ rc = wait_event_timeout(a->mem_wait,
+ (ac->result.opcode == hdr->opcode),
+ 5 * HZ);
+
+ if (!rc) {
+ dev_err(a->dev, "CMD timeout\n");
+ rc = -ETIMEDOUT;
+ } else if (ac->result.status > 0) {
+ dev_err(a->dev, "DSP returned error[%x]\n",
+ ac->result.status);
+ rc = -EINVAL;
+ }
+
+err:
+ mutex_unlock(&ac->cmd_lock);
+ return rc;
+}
+
+static int __q6asm_memory_unmap(struct audio_client *ac,
+ phys_addr_t buf_add, int dir)
+{
+ struct avs_cmd_shared_mem_unmap_regions *mem_unmap;
+ struct q6asm *a = dev_get_drvdata(ac->dev->parent);
+ struct apr_pkt *pkt;
+ int rc, pkt_size;
+ void *p;
+=======
+ void *data)
+{
+ int rc;
+
+ mutex_lock(&a->session_lock);
+ a->mem_state = 1;
+ rc = apr_send_pkt(a->adev, data);
+ if (rc < 0)
+ goto err;
+
+ rc = wait_event_timeout(a->mem_wait, (a->mem_state <= 0), 5 * HZ);
+ if (!rc) {
+ dev_err(a->dev, "CMD timeout\n");
+ rc = -ETIMEDOUT;
+ } else if (a->mem_state < 0) {
+ rc = q6dsp_errno(a->mem_state);
+ }
+
+err:
+ mutex_unlock(&a->session_lock);
+ return rc;
+}
+
+static int __q6asm_memory_unmap_v2(struct audio_client *ac,
+ phys_addr_t buf_add, int dir)
+{
+ struct avs_cmd_shared_mem_unmap_regions mem_unmap;
+ struct q6asm *a = dev_get_drvdata(ac->dev);
+ int rc;
+>>>>>>>
+
+ if (ac->port[dir].mem_map_handle == 0) {
+ dev_err(ac->dev, "invalid mem handle\n");
+ return -EINVAL;
+ }
+
+<<<<<<<
+ mem_unmap.hdr.hdr_field = APR_SEQ_CMD_HDR_FIELD;
+ mem_unmap.hdr.src_port = 0;
+ mem_unmap.hdr.dest_port = 0;
+ mem_unmap.hdr.pkt_size = sizeof(mem_unmap);
+ mem_unmap.hdr.token = ((ac->session << 8) | dir);
+
+ mem_unmap.hdr.opcode = ASM_CMD_SHARED_MEM_UNMAP_REGIONS;
+ mem_unmap.mem_map_handle = ac->port[dir].mem_map_handle;
+
+ rc = q6asm_apr_send_session_pkt(a, ac, &mem_unmap);
+ if (rc < 0)
+ return rc;
+
+ ac->port[dir].mem_map_handle = 0;
+
+ return 0;
+}
+
+static int __q6asm_memory_unmap_v1(struct audio_client *ac,
+ phys_addr_t buf_add, int dir)
+{
+ struct asm_stream_cmd_memory_unmap_regions *mem_unmap;
+ struct asm_memory_unmap_regions *mregions = NULL;
+ struct audio_port_data *port = NULL;
+ struct audio_buffer *ab = NULL;
+ void *b;
+
+ struct q6asm *a = dev_get_drvdata(ac->dev);
+ int rc, cmd_size, bufcnt, i;
+
+ port = &ac->port[dir];
+ bufcnt = port->num_periods;
+
+ if (ac->port[dir].mem_map_handle == 0) {
+ dev_err(ac->dev, "invalid mem handle\n");
+ return -EINVAL;
+ }
+
+ cmd_size = sizeof(struct asm_stream_cmd_memory_unmap_regions) +
+ sizeof(struct asm_memory_unmap_regions) * bufcnt;
+
+ b = kzalloc(cmd_size, GFP_KERNEL);
+ mem_unmap = b;
+ if (!mem_unmap)
+ return -ENOMEM;
+
+
+ mem_unmap->hdr.hdr_field = APR_SEQ_CMD_HDR_FIELD;
+ mem_unmap->hdr.src_port = 0;
+ mem_unmap->hdr.dest_port = 0;
+ mem_unmap->hdr.pkt_size = cmd_size;
+ mem_unmap->hdr.token = ((ac->session << 8) | dir);
+
+ mem_unmap->hdr.opcode = ASM_SESSION_CMD_MEMORY_UNMAP_REGIONS;
+ mem_unmap->nregions = bufcnt & 0x00ff;
+
+ mregions = b + sizeof(*mem_unmap);
+
+ for (i = 0; i < bufcnt; i++) {
+ ab = &port->buf[i];
+ mregions->phys = lower_32_bits(ab->phys);
+ ++mregions;
+ }
+
+ rc = q6asm_apr_send_session_pkt(a, ac, mem_unmap);
+ if (rc < 0)
+ return rc;
+
+ ac->port[dir].mem_map_handle = 0;
+
+ kfree(b);
+ return 0;
+=======
+ pkt_size = APR_HDR_SIZE + sizeof(*mem_unmap);
+ p = kzalloc(pkt_size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ mem_unmap = p + APR_HDR_SIZE;
+
+ pkt->hdr.hdr_field = APR_SEQ_CMD_HDR_FIELD;
+ pkt->hdr.src_port = 0;
+ pkt->hdr.dest_port = 0;
+ pkt->hdr.pkt_size = pkt_size;
+ pkt->hdr.token = ((ac->session << 8) | dir);
+
+ pkt->hdr.opcode = ASM_CMD_SHARED_MEM_UNMAP_REGIONS;
+ mem_unmap->mem_map_handle = ac->port[dir].mem_map_handle;
+
+ rc = q6asm_apr_send_session_pkt(a, ac, pkt, 0);
+ if (rc < 0) {
+ kfree(pkt);
+ return rc;
+ }
+
+ ac->port[dir].mem_map_handle = 0;
+
+ kfree(pkt);
+ return 0;
+}
+
+
+static void q6asm_audio_client_free_buf(struct audio_client *ac,
+ struct audio_port_data *port)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ac->lock, flags);
+ port->num_periods = 0;
+ kfree(port->buf);
+ port->buf = NULL;
+ spin_unlock_irqrestore(&ac->lock, flags);
+>>>>>>>
+}
+
+/**
+ * q6asm_unmap_memory_regions() - unmap memory regions in the dsp.
+ *
+ * @dir: direction of audio stream
+ * @ac: audio client instanace
+ *
+ * Return: Will be an negative value on failure or zero on success
+ */
+int q6asm_unmap_memory_regions(unsigned int dir, struct audio_client *ac)
+{
+<<<<<<<
+=======
+ struct q6asm *a = dev_get_drvdata(ac->dev);
+>>>>>>>
+ struct audio_port_data *port;
+ int cnt = 0;
+ int rc = 0;
+
+<<<<<<<
+=======
+ mutex_lock(&ac->lock);
+>>>>>>>
+ port = &ac->port[dir];
+ if (!port->buf) {
+ rc = -EINVAL;
+ goto err;
+ }
+<<<<<<<
+ cnt = port->num_periods - 1;
+ if (cnt >= 0) {
+ a->ops->memory_unmap(ac, port->buf[dir].phys, dir);
+=======
+
+ cnt = port->num_periods - 1;
+ if (cnt >= 0) {
+ rc = __q6asm_memory_unmap(ac, port->buf[dir].phys, dir);
+>>>>>>>
+ if (rc < 0) {
+ dev_err(ac->dev, "%s: Memory_unmap_regions failed %d\n",
+ __func__, rc);
+ goto err;
+ }
+ }
+
+<<<<<<<
+ port->num_periods = 0;
+ kfree(port->buf);
+ port->buf = NULL;
+
+err:
+ mutex_unlock(&ac->lock);
+=======
+ q6asm_audio_client_free_buf(ac, port);
+
+err:
+>>>>>>>
+ return rc;
+}
+EXPORT_SYMBOL_GPL(q6asm_unmap_memory_regions);
+
+<<<<<<<
+static int __q6asm_memory_map_regions(struct audio_client *ac, int dir,
+ size_t period_sz, unsigned int periods,
+ bool is_contiguous)
+{
+ struct avs_cmd_shared_mem_map_regions *cmd = NULL;
+ struct avs_shared_map_region_payload *mregions = NULL;
+ struct q6asm *a = dev_get_drvdata(ac->dev->parent);
+ struct audio_port_data *port = NULL;
+ struct audio_buffer *ab = NULL;
+ struct apr_pkt *pkt;
+ void *p;
+ unsigned long flags;
+ uint32_t num_regions, buf_sz;
+ int rc, i, pkt_size;
+
+ if (is_contiguous) {
+ num_regions = 1;
+ buf_sz = period_sz * periods;
+ } else {
+ buf_sz = period_sz;
+ num_regions = periods;
+ }
+
+ /* DSP expects size should be aligned to 4K */
+ buf_sz = ALIGN(buf_sz, 4096);
+
+ pkt_size = APR_HDR_SIZE + sizeof(*cmd) +
+ (sizeof(*mregions) * num_regions);
+
+ p = kzalloc(pkt_size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ cmd = p + APR_HDR_SIZE;
+ mregions = p + APR_HDR_SIZE + sizeof(*cmd);
+
+ pkt->hdr.hdr_field = APR_SEQ_CMD_HDR_FIELD;
+ pkt->hdr.src_port = 0;
+ pkt->hdr.dest_port = 0;
+ pkt->hdr.pkt_size = pkt_size;
+ pkt->hdr.token = ((ac->session << 8) | dir);
+ pkt->hdr.opcode = ASM_CMD_SHARED_MEM_MAP_REGIONS;
+
+=======
+static int __q6asm_memory_map_regions_v1(struct audio_client *ac, int dir,
+ size_t period_sz, unsigned int periods,
+ bool is_contiguous)
+{
+ struct asm_stream_cmd_memory_map_regions *cmd = NULL;
+ struct asm_memory_map_regions *mregions = NULL;
+ struct q6asm *a = dev_get_drvdata(ac->dev);
+ struct audio_port_data *port = NULL;
+ struct audio_buffer *ab = NULL;
+ void *mmap_region_cmd = NULL;
+ uint32_t num_regions, buf_sz;
+ int rc, i, cmd_size;
+
+ num_regions = is_contiguous ? 1 : periods;
+ buf_sz = is_contiguous ? (period_sz * periods) : period_sz;
+ buf_sz = PAGE_ALIGN(buf_sz);
+
+ cmd_size = sizeof(*cmd) + (sizeof(*mregions) * num_regions);
+ mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
+ if (!mmap_region_cmd)
+ return -ENOMEM;
+
+ cmd = mmap_region_cmd;
+
+ cmd->hdr.hdr_field = APR_SEQ_CMD_HDR_FIELD;
+ cmd->hdr.src_port = 0;
+ cmd->hdr.dest_port = 0;
+ cmd->hdr.pkt_size = cmd_size;
+ cmd->hdr.token = ((ac->session << 8) | dir);
+
+
+ cmd->hdr.opcode = ASM_SESSION_CMD_MEMORY_MAP_REGIONS;
+ cmd->mempool_id = 0;
+ cmd->nregions = num_regions;
+
+ mregions = mmap_region_cmd + sizeof(*cmd);
+
+ port = &ac->port[dir];
+
+ for (i = 0; i < num_regions; i++) {
+ ab = &port->buf[i];
+ mregions->phys = lower_32_bits(ab->phys);
+ mregions->buf_size = buf_sz;
+ ++mregions;
+ }
+
+ rc = q6asm_apr_send_session_pkt(a, ac, mmap_region_cmd);
+
+ kfree(mmap_region_cmd);
+
+ return rc;
+}
+
+static int __q6asm_memory_map_regions_v2(struct audio_client *ac, int dir,
+ size_t period_sz, unsigned int periods,
+ bool is_contiguous)
+{
+ struct avs_cmd_shared_mem_map_regions *cmd = NULL;
+ struct avs_shared_map_region_payload *mregions = NULL;
+ struct q6asm *a = dev_get_drvdata(ac->dev);
+ struct audio_port_data *port = NULL;
+ struct audio_buffer *ab = NULL;
+ void *mmap_region_cmd = NULL;
+ uint32_t num_regions, buf_sz;
+ int rc, i, cmd_size;
+
+ num_regions = is_contiguous ? 1 : periods;
+ buf_sz = is_contiguous ? (period_sz * periods) : period_sz;
+ buf_sz = PAGE_ALIGN(buf_sz);
+
+ cmd_size = sizeof(*cmd) + (sizeof(*mregions) * num_regions);
+ mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
+ if (!mmap_region_cmd)
+ return -ENOMEM;
+
+ cmd = mmap_region_cmd;
+
+ cmd->hdr.hdr_field = APR_SEQ_CMD_HDR_FIELD;
+ cmd->hdr.src_port = 0;
+ cmd->hdr.dest_port = 0;
+ cmd->hdr.pkt_size = cmd_size;
+ cmd->hdr.token = ((ac->session << 8) | dir);
+
+
+ cmd->hdr.opcode = ASM_CMD_SHARED_MEM_MAP_REGIONS;
+>>>>>>>
+ cmd->mem_pool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL;
+ cmd->num_regions = num_regions;
+ cmd->property_flag = 0x00;
+
+<<<<<<<
+ mregions = mmap_region_cmd + sizeof(*cmd);
+
+=======
+ spin_lock_irqsave(&ac->lock, flags);
+>>>>>>>
+ port = &ac->port[dir];
+
+ for (i = 0; i < num_regions; i++) {
+ ab = &port->buf[i];
+ mregions->shm_addr_lsw = lower_32_bits(ab->phys);
+ mregions->shm_addr_msw = upper_32_bits(ab->phys);
+ mregions->mem_size_bytes = buf_sz;
+ ++mregions;
+ }
+<<<<<<<
+ spin_unlock_irqrestore(&ac->lock, flags);
+
+ rc = q6asm_apr_send_session_pkt(a, ac, pkt,
+ ASM_CMDRSP_SHARED_MEM_MAP_REGIONS);
+
+ kfree(pkt);
+=======
+
+ rc = q6asm_apr_send_session_pkt(a, ac, mmap_region_cmd);
+
+ kfree(mmap_region_cmd);
+>>>>>>>
+
+ return rc;
+}
+
+/**
+ * q6asm_map_memory_regions() - map memory regions in the dsp.
+ *
+ * @dir: direction of audio stream
+ * @ac: audio client instanace
+ * @phys: physcial address that needs mapping.
+ * @period_sz: audio period size
+ * @periods: number of periods
+ *
+ * Return: Will be an negative value on failure or zero on success
+ */
+int q6asm_map_memory_regions(unsigned int dir, struct audio_client *ac,
+ phys_addr_t phys,
+ size_t period_sz, unsigned int periods)
+{
+<<<<<<<
+ struct audio_buffer *buf;
+ unsigned long flags;
+ int cnt;
+ int rc;
+
+ spin_lock_irqsave(&ac->lock, flags);
+ if (ac->port[dir].buf) {
+ dev_err(ac->dev, "Buffer already allocated\n");
+ spin_unlock_irqrestore(&ac->lock, flags);
+ return 0;
+ }
+
+ buf = kzalloc(((sizeof(struct audio_buffer)) * periods), GFP_ATOMIC);
+ if (!buf) {
+ spin_unlock_irqrestore(&ac->lock, flags);
+ return -ENOMEM;
+=======
+ struct q6asm *a = dev_get_drvdata(ac->dev);
+ struct audio_buffer *buf;
+ int cnt;
+ int rc;
+
+ mutex_lock(&ac->lock);
+ if (ac->port[dir].buf) {
+ dev_err(ac->dev, "Buffer already allocated\n");
+ rc = 0;
+ goto err;
+ }
+
+
+ buf = kzalloc(((sizeof(struct audio_buffer)) * periods), GFP_KERNEL);
+ if (!buf) {
+ rc = -ENOMEM;
+ goto err;
+>>>>>>>
+ }
+
+
+ ac->port[dir].buf = buf;
+
+ buf[0].phys = phys;
+<<<<<<<
+=======
+ buf[0].used = !!dir;
+>>>>>>>
+ buf[0].size = period_sz;
+
+ for (cnt = 1; cnt < periods; cnt++) {
+ if (period_sz > 0) {
+ buf[cnt].phys = buf[0].phys + (cnt * period_sz);
+<<<<<<<
+ buf[cnt].size = period_sz;
+ }
+ }
+ ac->port[dir].num_periods = periods;
+
+ spin_unlock_irqrestore(&ac->lock, flags);
+
+ rc = __q6asm_memory_map_regions(ac, dir, period_sz, periods, 1);
+ if (rc < 0) {
+ dev_err(ac->dev, "Memory_map_regions failed\n");
+ q6asm_audio_client_free_buf(ac, &ac->port[dir]);
+ }
+
+=======
+ buf[cnt].used = dir ^ 1;
+ buf[cnt].size = period_sz;
+ }
+ }
+
+ ac->port[dir].num_periods = periods;
+ rc = a->ops->memory_map_regions(ac, dir, period_sz, periods, 1);
+ if (rc < 0) {
+ dev_err(ac->dev, "Memory_map_regions failed\n");
+ ac->port[dir].num_periods = 0;
+ kfree(buf);
+ ac->port[dir].buf = NULL;
+ goto err;
+ }
+
+err:
+ mutex_unlock(&ac->lock);
+>>>>>>>
+ return rc;
+}
+EXPORT_SYMBOL_GPL(q6asm_map_memory_regions);
+
+<<<<<<<
+=======
+static void q6asm_audio_client_release(struct kref *ref)
+{
+ struct audio_client *ac;
+ struct q6asm *a;
+ unsigned long flags;
+
+ ac = container_of(ref, struct audio_client, refcount);
+ a = ac->q6asm;
+
+ spin_lock_irqsave(&a->slock, flags);
+ a->session[ac->session] = NULL;
+ spin_unlock_irqrestore(&a->slock, flags);
+
+ kfree(ac);
+}
+
+>>>>>>>
+/**
+ * q6asm_audio_client_free() - Freee allocated audio client
+ *
+ * @ac: audio client to free
+ */
+void q6asm_audio_client_free(struct audio_client *ac)
+{
+<<<<<<<
+ kref_put(&ac->refcount, q6asm_audio_client_release);
+=======
+ struct q6asm *a = dev_get_drvdata(ac->dev);
+
+ mutex_lock(&a->session_lock);
+ a->session[ac->session] = NULL;
+ mutex_unlock(&a->session_lock);
+ kfree(ac);
+>>>>>>>
+}
+EXPORT_SYMBOL_GPL(q6asm_audio_client_free);
+
+static struct audio_client *q6asm_get_audio_client(struct q6asm *a,
+ int session_id)
+{
+<<<<<<<
+ if ((session_id <= 0) || (session_id > MAX_SESSIONS)) {
+ dev_err(a->dev, "invalid session: %d\n", session_id);
+ return NULL;
+ }
+
+ if (!a->session[session_id]) {
+ dev_err(a->dev, "session not active: %d\n", session_id);
+ return NULL;
+ }
+
+ return a->session[session_id];
+}
+
+/**
+ * q6asm_set_dai_data() - set dai private data
+ *
+ * @dev: Pointer to asm device.
+ * @data: dai private data
+ *
+ */
+void q6asm_set_dai_data(struct device *dev, void *data)
+{
+ struct q6asm *a = dev_get_drvdata(dev);
+
+ a->dai_data = data;
+}
+EXPORT_SYMBOL_GPL(q6asm_set_dai_data);
+
+/**
+ * q6asm_get_dai_data() - get dai private data
+ *
+ * @dev: Pointer to asm device.
+ *
+ * Return: pointer to dai private data
+ */
+void *q6asm_get_dai_data(struct device *dev)
+{
+ struct q6asm *a = dev_get_drvdata(dev);
+
+ return a->dai_data;
+}
+EXPORT_SYMBOL_GPL(q6asm_get_dai_data);
+
+static int32_t q6asm_stream_callback(struct apr_device *adev,
+ struct apr_client_message *data,
+=======
+ struct audio_client *ac = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&a->slock, flags);
+ if ((session_id <= 0) || (session_id > MAX_SESSIONS)) {
+ dev_err(a->dev, "invalid session: %d\n", session_id);
+ goto err;
+ }
+
+ /* check for valid session */
+ if (!a->session[session_id])
+ goto err;
+ else if (a->session[session_id]->session != session_id)
+ goto err;
+
+ ac = a->session[session_id];
+ kref_get(&ac->refcount);
+err:
+ spin_unlock_irqrestore(&a->slock, flags);
+ return ac;
+}
+
+static int32_t q6asm_stream_callback(struct apr_device *adev,
+ struct apr_resp_pkt *data,
+>>>>>>>
+ int session_id)
+{
+ struct q6asm *q6asm = dev_get_drvdata(&adev->dev);
+ struct aprv2_ibasic_rsp_result_t *result;
+<<<<<<<
+ struct apr_hdr *hdr = &data->hdr;
+ struct audio_port_data *port;
+ struct audio_client *ac;
+ uint32_t client_event = 0;
+ int ret = 0;
+=======
+ struct audio_port_data *port;
+ struct audio_client *ac;
+ uint32_t token;
+ uint32_t client_event = 0;
+>>>>>>>
+
+ ac = q6asm_get_audio_client(q6asm, session_id);
+ if (!ac)/* Audio client might already be freed by now */
+ return 0;
+
+<<<<<<<
+ if (!q6asm_is_valid_audio_client(ac))
+ return -EINVAL;
+
+ result = data->payload;
+
+ switch (data->opcode) {
+ case APR_BASIC_RSP_RESULT:
+ token = data->token;
+=======
+ result = data->payload;
+
+ switch (hdr->opcode) {
+ case APR_BASIC_RSP_RESULT:
+>>>>>>>
+ switch (result->opcode) {
+ case ASM_SESSION_CMD_PAUSE:
+ client_event = ASM_CLIENT_EVENT_CMD_PAUSE_DONE;
+ break;
+ case ASM_SESSION_CMD_SUSPEND:
+ client_event = ASM_CLIENT_EVENT_CMD_SUSPEND_DONE;
+ break;
+ case ASM_DATA_CMD_EOS:
+ client_event = ASM_CLIENT_EVENT_CMD_EOS_DONE;
+ break;
+<<<<<<<
+ break;
+ case ASM_STREAM_CMD_FLUSH:
+ client_event = ASM_CLIENT_EVENT_CMD_FLUSH_DONE;
+ break;
+ case ASM_SESSION_CMD_RUN:
+ case ASM_SESSION_CMD_RUN_V2:
+ client_event = ASM_CLIENT_EVENT_CMD_RUN_DONE;
+ break;
+
+ case ASM_STREAM_CMD_FLUSH_READBUFS:
+ if (token != ac->session) {
+ dev_err(ac->dev, "session invalid\n");
+ return -EINVAL;
+ }
+ case ASM_STREAM_CMD_CLOSE:
+ client_event = ASM_CLIENT_EVENT_CMD_CLOSE_DONE;
+ break;
+ case ASM_STREAM_CMD_OPEN_WRITE:
+ case ASM_STREAM_CMD_OPEN_WRITE_V2_1:
+=======
+ case ASM_STREAM_CMD_FLUSH:
+ client_event = ASM_CLIENT_EVENT_CMD_FLUSH_DONE;
+ break;
+ case ASM_SESSION_CMD_RUN_V2:
+ client_event = ASM_CLIENT_EVENT_CMD_RUN_DONE;
+ break;
+ case ASM_STREAM_CMD_CLOSE:
+ client_event = ASM_CLIENT_EVENT_CMD_CLOSE_DONE;
+ break;
+ case ASM_STREAM_CMD_FLUSH_READBUFS:
+ client_event = ASM_CLIENT_EVENT_CMD_OUT_FLUSH_DONE;
+ break;
+>>>>>>>
+ case ASM_STREAM_CMD_OPEN_WRITE_V3:
+ case ASM_STREAM_CMD_OPEN_READ_V3:
+ case ASM_STREAM_CMD_OPEN_READWRITE_V2:
+ case ASM_STREAM_CMD_SET_ENCDEC_PARAM:
+<<<<<<<
+=======
+ case ASM_DATA_CMD_MEDIA_FORMAT_UPDATE:
+>>>>>>>
+ case ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2:
+ if (result->status != 0) {
+ dev_err(ac->dev,
+ "cmd = 0x%x returned error = 0x%x\n",
+ result->opcode, result->status);
+<<<<<<<
+ ac->cmd_state = -result->status;
+ wake_up(&ac->cmd_wait);
+ return 0;
+=======
+ ac->result = *result;
+ wake_up(&ac->cmd_wait);
+ ret = 0;
+ goto done;
+>>>>>>>
+ }
+ break;
+ default:
+ dev_err(ac->dev, "command[0x%x] not expecting rsp\n",
+ result->opcode);
+ break;
+ }
+
+<<<<<<<
+ ac->result = *result;
+ wake_up(&ac->cmd_wait);
+
+ if (ac->cb)
+ ac->cb(client_event, hdr->token,
+ data->payload, ac->priv);
+
+ ret = 0;
+ goto done;
+
+ case ASM_DATA_EVENT_WRITE_DONE_V2:
+ client_event = ASM_CLIENT_EVENT_DATA_WRITE_DONE;
+ if (ac->io_mode & ASM_SYNC_IO_MODE) {
+ phys_addr_t phys;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ac->lock, flags);
+
+ port = &ac->port[SNDRV_PCM_STREAM_PLAYBACK];
+
+ if (!port->buf) {
+ spin_unlock_irqrestore(&ac->lock, flags);
+ ret = 0;
+ goto done;
+ }
+
+ phys = port->buf[hdr->token].phys;
+=======
+ if (ac->cmd_state) {
+ ac->cmd_state = 0;
+ wake_up(&ac->cmd_wait);
+ }
+ if (ac->cb)
+ ac->cb(client_event, data->token,
+ data->payload, ac->priv);
+
+ return 0;
+
+ case ASM_DATA_EVENT_WRITE_DONE:
+ case ASM_DATA_EVENT_WRITE_DONE_V2:
+ port = &ac->port[SNDRV_PCM_STREAM_PLAYBACK];
+
+ client_event = ASM_CLIENT_EVENT_DATA_WRITE_DONE;
+
+ if (ac->io_mode & ASM_SYNC_IO_MODE) {
+ phys_addr_t phys = port->buf[data->token].phys;
+>>>>>>>
+
+ if (lower_32_bits(phys) != result->opcode ||
+ upper_32_bits(phys) != result->status) {
+ dev_err(ac->dev, "Expected addr %pa\n",
+<<<<<<<
+ &port->buf[data->token].phys);
+ return -EINVAL;
+ }
+ token = data->token;
+ port->buf[token].used = 1;
+ }
+ break;
+ case ASM_DATA_EVENT_READ_DONE_V2: {
+ struct asm_data_cmd_read_v2_done *done = data->payload;
+ port = &ac->port[SNDRV_PCM_STREAM_CAPTURE];
+ client_event = ASM_CLIENT_EVENT_DATA_READ_DONE;
+
+ if (ac->io_mode & ASM_SYNC_IO_MODE) {
+ phys_addr_t phys = port->buf[data->token].phys;
+ token = data->token;
+ port->buf[token].used = 0;
+=======
+ &port->buf[hdr->token].phys);
+ spin_unlock_irqrestore(&ac->lock, flags);
+ ret = -EINVAL;
+ goto done;
+ }
+ spin_unlock_irqrestore(&ac->lock, flags);
+ }
+ break;
+ case ASM_DATA_EVENT_READ_DONE_V2:
+ client_event = ASM_CLIENT_EVENT_DATA_READ_DONE;
+ if (ac->io_mode & ASM_SYNC_IO_MODE) {
+ struct asm_data_cmd_read_v2_done *done = data->payload;
+ unsigned long flags;
+ phys_addr_t phys;
+
+ spin_lock_irqsave(&ac->lock, flags);
+ port = &ac->port[SNDRV_PCM_STREAM_CAPTURE];
+ if (!port->buf) {
+ spin_unlock_irqrestore(&ac->lock, flags);
+ ret = 0;
+ goto done;
+ }
+
+ phys = port->buf[hdr->token].phys;
+>>>>>>>
+
+ if (upper_32_bits(phys) != done->buf_addr_msw ||
+ lower_32_bits(phys) != done->buf_addr_lsw) {
+ dev_err(ac->dev, "Expected addr %pa %08x-%08x\n",
+<<<<<<<
+ &port->buf[data->token].phys, done->buf_addr_lsw, done->buf_addr_msw);
+ break;
+ }
+ }
+ }
+=======
+ &port->buf[hdr->token].phys,
+ done->buf_addr_lsw,
+ done->buf_addr_msw);
+ spin_unlock_irqrestore(&ac->lock, flags);
+ ret = -EINVAL;
+ goto done;
+ }
+ spin_unlock_irqrestore(&ac->lock, flags);
+ }
+>>>>>>>
+
+ break;
+ }
+
+ if (ac->cb)
+<<<<<<<
+ ac->cb(client_event, data->token, data->payload, ac->priv);
+
+ return 0;
+}
+
+static int q6asm_srvc_callback(struct apr_device *adev,
+ struct apr_client_message *data)
+{
+ struct aprv2_ibasic_rsp_result_t *result;
+ struct q6asm *a, *q6asm = dev_get_drvdata(&adev->dev);
+ struct audio_client *ac = NULL;
+ struct audio_port_data *port;
+ uint32_t dir = 0;
+ uint32_t sid = 0;
+ int session_id;
+
+ session_id = (data->dest_port >> 8) & 0xFF;
+ if (session_id)
+ return q6asm_stream_callback(adev, data, session_id);
+
+ result = data->payload;
+ sid = (data->token >> 8) & 0x0F;
+=======
+ ac->cb(client_event, hdr->token, data->payload, ac->priv);
+
+done:
+ kref_put(&ac->refcount, q6asm_audio_client_release);
+ return ret;
+}
+
+static int q6asm_srvc_callback(struct apr_device *adev,
+ struct apr_resp_pkt *data)
+{
+ struct q6asm *q6asm = dev_get_drvdata(&adev->dev);
+ struct aprv2_ibasic_rsp_result_t *result;
+ struct audio_port_data *port;
+ struct audio_client *ac = NULL;
+ struct apr_hdr *hdr = &data->hdr;
+ struct q6asm *a;
+ uint32_t sid = 0;
+ uint32_t dir = 0;
+ int session_id;
+
+ session_id = (hdr->dest_port >> 8) & 0xFF;
+ if (session_id)
+ return q6asm_stream_callback(adev, data, session_id);
+
+ sid = (hdr->token >> 8) & 0x0F;
+>>>>>>>
+ ac = q6asm_get_audio_client(q6asm, sid);
+ if (!ac) {
+ dev_err(&adev->dev, "Audio Client not active\n");
+ return 0;
+ }
+
+<<<<<<<
+ a = dev_get_drvdata(ac->dev);
+ dir = (data->token & 0x0F);
+ port = &ac->port[dir];
+
+ switch (data->opcode)
+ case APR_BASIC_RSP_RESULT: {
+ switch (result->opcode) {
+ case ASM_SESSION_CMD_MEMORY_MAP_REGIONS:
+ a->mem_state = 0;
+ ac->port[dir].mem_map_handle = result->opcode;
+ wake_up(&a->mem_wait);
+ break;
+
+ case ASM_SESSION_CMD_MEMORY_UNMAP_REGIONS:
+ a->mem_state = 0;
+ ac->port[dir].mem_map_handle = 0;
+ wake_up(&a->mem_wait);
+ break;
+ case ASM_CMD_SHARED_MEM_MAP_REGIONS:
+ case ASM_CMD_SHARED_MEM_UNMAP_REGIONS:
+ if (result->status != 0) {
+ dev_err(ac->dev,
+ "cmd = 0x%x retur err= 0x%x sid:%d\n",
+ result->opcode, result->status, sid);
+ a->mem_state = -result->status;
+ } else {
+ a->mem_state = 0;
+ }
+
+=======
+ a = dev_get_drvdata(ac->dev->parent);
+ dir = (hdr->token & 0x0F);
+ port = &ac->port[dir];
+ result = data->payload;
+
+ switch (hdr->opcode) {
+ case APR_BASIC_RSP_RESULT:
+ switch (result->opcode) {
+ case ASM_CMD_SHARED_MEM_MAP_REGIONS:
+ case ASM_CMD_SHARED_MEM_UNMAP_REGIONS:
+ ac->result = *result;
+>>>>>>>
+ wake_up(&a->mem_wait);
+ break;
+ default:
+ dev_err(&adev->dev, "command[0x%x] not expecting rsp\n",
+ result->opcode);
+ break;
+ }
+<<<<<<<
+ goto done;
+ case ASM_CMDRSP_SHARED_MEM_MAP_REGIONS:
+ ac->result.status = 0;
+ ac->result.opcode = hdr->opcode;
+ port->mem_map_handle = result->opcode;
+ wake_up(&a->mem_wait);
+ break;
+ case ASM_CMD_SHARED_MEM_UNMAP_REGIONS:
+ ac->result.opcode = hdr->opcode;
+ ac->result.status = 0;
+ port->mem_map_handle = 0;
+=======
+ return 0;
+ case ASM_CMDRSP_SHARED_MEM_MAP_REGIONS:
+ a->mem_state = 0;
+ ac->port[dir].mem_map_handle = result->opcode;
+ wake_up(&a->mem_wait);
+ break;
+ case ASM_CMD_SHARED_MEM_UNMAP_REGIONS:
+ case ASM_SESSION_CMD_MEMORY_UNMAP_REGIONS:
+ a->mem_state = 0;
+ ac->port[dir].mem_map_handle = 0;
+>>>>>>>
+ wake_up(&a->mem_wait);
+ break;
+ default:
+ dev_dbg(&adev->dev, "command[0x%x]success [0x%x]\n",
+ result->opcode, result->status);
+ break;
+ }
+
+ if (ac->cb)
+<<<<<<<
+ ac->cb(data->opcode, data->token, data->payload, ac->priv);
+=======
+ ac->cb(hdr->opcode, hdr->token, data->payload, ac->priv);
+
+done:
+ kref_put(&ac->refcount, q6asm_audio_client_release);
+>>>>>>>
+
+ return 0;
+}
+
+/**
+ * q6asm_get_session_id() - get session id for audio client
+ *
+<<<<<<<
+ * @ac: audio client pointer
+=======
+ * @c: audio client pointer
+>>>>>>>
+ *
+ * Return: Will be an session id of the audio client.
+ */
+int q6asm_get_session_id(struct audio_client *c)
+{
+ return c->session;
+}
+EXPORT_SYMBOL_GPL(q6asm_get_session_id);
+
+/**
+ * q6asm_audio_client_alloc() - Allocate a new audio client
+ *
+ * @dev: Pointer to asm child device.
+ * @cb: event callback.
+ * @priv: private data associated with this client.
+<<<<<<<
+=======
+ * @stream_id: stream id
+ * @perf_mode: performace mode for this client
+>>>>>>>
+ *
+ * Return: Will be an error pointer on error or a valid audio client
+ * on success.
+ */
+struct audio_client *q6asm_audio_client_alloc(struct device *dev, q6asm_cb cb,
+<<<<<<<
+ void *priv, int stream_id)
+{
+ struct q6asm *a = dev_get_drvdata(dev);
+ struct audio_client *ac;
+
+ if (stream_id + 1 > MAX_SESSIONS)
+ return ERR_PTR(-EINVAL);
+=======
+ void *priv, int stream_id,
+ int perf_mode)
+{
+ struct q6asm *a = dev_get_drvdata(dev->parent);
+ struct audio_client *ac;
+ unsigned long flags;
+
+ ac = q6asm_get_audio_client(a, stream_id + 1);
+ if (ac) {
+ dev_err(dev, "Audio Client already active\n");
+ return ac;
+ }
+>>>>>>>
+
+ ac = kzalloc(sizeof(*ac), GFP_KERNEL);
+ if (!ac)
+ return ERR_PTR(-ENOMEM);
+
+<<<<<<<
+ mutex_lock(&a->session_lock);
+ a->session[stream_id + 1] = ac;
+ mutex_unlock(&a->session_lock);
+
+ ac->session = stream_id + 1;
+ ac->cb = cb;
+ ac->dev = dev;
+ ac->priv = priv;
+ ac->io_mode = ASM_SYNC_IO_MODE;
+ ac->perf_mode = LEGACY_PCM_MODE;
+ /* DSP expects stream id from 1 */
+ ac->stream_id = 1;
+ ac->adev = a->adev;
+
+ init_waitqueue_head(&ac->cmd_wait);
+ mutex_init(&ac->lock);
+ ac->cmd_state = 0;
+=======
+ spin_lock_irqsave(&a->slock, flags);
+ a->session[stream_id + 1] = ac;
+ spin_unlock_irqrestore(&a->slock, flags);
+ ac->session = stream_id + 1;
+ ac->cb = cb;
+ ac->dev = dev;
+ ac->q6asm = a;
+ ac->priv = priv;
+ ac->io_mode = ASM_SYNC_IO_MODE;
+ ac->perf_mode = perf_mode;
+ /* DSP expects stream id from 1 */
+ ac->stream_id = 1;
+ ac->adev = a->adev;
+ kref_init(&ac->refcount);
+
+ init_waitqueue_head(&ac->cmd_wait);
+ mutex_init(&ac->cmd_lock);
+ spin_lock_init(&ac->lock);
+>>>>>>>
+
+ return ac;
+}
+EXPORT_SYMBOL_GPL(q6asm_audio_client_alloc);
+
+<<<<<<<
+static int q6asm_ac_send_cmd_sync(struct audio_client *ac, struct apr_pkt *pkt)
+{
+ struct apr_hdr *hdr = &pkt->hdr;
+ int rc;
+
+ mutex_lock(&ac->cmd_lock);
+ ac->result.opcode = 0;
+ ac->result.status = 0;
+
+ rc = apr_send_pkt(ac->adev, pkt);
+ if (rc < 0)
+ goto err;
+
+ rc = wait_event_timeout(ac->cmd_wait,
+ (ac->result.opcode == hdr->opcode), 5 * HZ);
+=======
+static int q6asm_ac_send_cmd_sync(struct audio_client *ac, void *cmd)
+{
+ int rc;
+
+ mutex_lock(&ac->lock);
+ ac->cmd_state = 1;
+
+ rc = apr_send_pkt(ac->adev, cmd);
+ if (rc < 0)
+ goto err;
+
+ rc = wait_event_timeout(ac->cmd_wait, (ac->cmd_state <= 0), 5 * HZ);
+>>>>>>>
+ if (!rc) {
+ dev_err(ac->dev, "CMD timeout\n");
+ rc = -ETIMEDOUT;
+ goto err;
+ }
+
+<<<<<<<
+ if (ac->cmd_state > 0)
+ rc = q6dsp_errno(ac->cmd_state);
+
+err:
+ mutex_unlock(&ac->lock);
+ return rc;
+}
+
+int q6asm_open_write_v3(struct audio_client *ac, uint32_t format,
+ uint16_t bits_per_sample)
+{
+ struct asm_stream_cmd_open_write_v3 open;
+ int rc;
+
+ q6asm_add_hdr(ac, &open.hdr, sizeof(open), true, ac->stream_id);
+
+ open.hdr.opcode = ASM_STREAM_CMD_OPEN_WRITE_V3;
+ open.mode_flags = 0x00;
+ open.mode_flags |= ASM_LEGACY_STREAM_SESSION;
+
+ /* source endpoint : matrix */
+ open.sink_endpointype = ASM_END_POINT_DEVICE_MATRIX;
+ open.bits_per_sample = bits_per_sample;
+ open.postprocopo_id = ASM_DEFAULT_POPP_TOPOLOGY;
+
+ switch (format) {
+ case FORMAT_LINEAR_PCM:
+ open.dec_fmt_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2;
+ break;
+ default:
+ dev_err(ac->dev, "Invalid format 0x%x\n", format);
+ return -EINVAL;
+ }
+
+ rc = q6asm_ac_send_cmd_sync(ac, &open);
+ if (rc < 0)
+ return rc;
+
+ ac->io_mode |= ASM_TUN_WRITE_IO_MODE;
+
+ return 0;
+}
+
+int q6asm_open_write_v1(struct audio_client *ac, uint32_t format,
+ uint16_t bits_per_sample)
+{
+ struct asm_stream_cmd_open_write_v1 open;
+ int rc;
+
+ q6asm_add_hdr(ac, &open.hdr, sizeof(open), true, ac->stream_id);
+
+ open.hdr.opcode = ASM_STREAM_CMD_OPEN_WRITE;
+ open.uMode = STREAM_PRIORITY_HIGH;
+
+ /* source endpoint : matrix */
+ open.sink_endpoint = ASM_END_POINT_DEVICE_MATRIX;
+ open.stream_handle = 0x00;
+ open.post_proc_top = ASM_DEFAULT_POPP_TOPOLOGY;
+
+ switch (format) {
+ case FORMAT_LINEAR_PCM:
+ open.format = LINEAR_PCM;
+ break;
+ default:
+ dev_err(ac->dev, "Invalid format 0x%x\n", format);
+ return -EINVAL;
+ }
+
+ rc = q6asm_ac_send_cmd_sync(ac, &open);
+ if (rc < 0)
+ return rc;
+
+ ac->io_mode |= ASM_TUN_WRITE_IO_MODE;
+
+ return 0;
+}
+/**
+ * q6asm_open_write() - Open audio client for writing
+ *
+ * @ac: audio client pointer
+ * @format: audio sample format
+ * @bits_per_sample: bits per sample
+ *
+ * Return: Will be an negative value on error or zero on success
+ */
+int q6asm_open_write(struct audio_client *ac, uint32_t format,
+ uint16_t bits_per_sample)
+{
+ struct q6asm *a = dev_get_drvdata(ac->dev);
+
+ return a->ops->open_write(ac, format, bits_per_sample);
+
+}
+EXPORT_SYMBOL_GPL(q6asm_open_write);
+
+static int __q6asm_run_v2(struct audio_client *ac, uint32_t flags,
+ uint32_t msw_ts, uint32_t lsw_ts, bool wait)
+{
+ struct asm_session_cmd_run_v2 run;
+
+ q6asm_add_hdr(ac, &run.hdr, sizeof(run), true, ac->stream_id);
+
+ run.hdr.opcode = ASM_SESSION_CMD_RUN_V2;
+ run.flags = flags;
+ run.time_lsw = lsw_ts;
+ run.time_msw = msw_ts;
+ if (wait)
+ return q6asm_ac_send_cmd_sync(ac, &run);
+ else
+ return apr_send_pkt(ac->adev, &run);
+
+}
+
+static int __q6asm_run_v1(struct audio_client *ac, uint32_t flags,
+ uint32_t msw_ts, uint32_t lsw_ts, bool wait)
+{
+ struct asm_stream_cmd_run_v1 run;
+
+ q6asm_add_hdr(ac, &run.hdr, sizeof(run), true, ac->stream_id);
+
+ run.hdr.opcode = ASM_SESSION_CMD_RUN;
+ run.flags = flags;
+ run.lsw_ts = lsw_ts;
+ run.msw_ts = msw_ts;
+ if (wait)
+ return q6asm_ac_send_cmd_sync(ac, &run);
+ else
+ return apr_send_pkt(ac->adev, &run);
+
+=======
+ if (ac->result.status > 0) {
+ dev_err(ac->dev, "DSP returned error[%x]\n",
+ ac->result.status);
+ rc = -EINVAL;
+ } else {
+ rc = 0;
+ }
+
+
+err:
+ mutex_unlock(&ac->cmd_lock);
+ return rc;
+}
+
+/**
+ * q6asm_open_write() - Open audio client for writing
+ *
+ * @ac: audio client pointer
+ * @format: audio sample format
+ * @bits_per_sample: bits per sample
+ *
+ * Return: Will be an negative value on error or zero on success
+ */
+int q6asm_open_write(struct audio_client *ac, uint32_t format,
+ uint16_t bits_per_sample)
+{
+ struct asm_stream_cmd_open_write_v3 *open;
+ struct apr_pkt *pkt;
+ void *p;
+ int rc, pkt_size;
+
+ pkt_size = APR_HDR_SIZE + sizeof(*open);
+
+ p = kzalloc(pkt_size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ open = p + APR_HDR_SIZE;
+ q6asm_add_hdr(ac, &pkt->hdr, pkt_size, true, ac->stream_id);
+
+ pkt->hdr.opcode = ASM_STREAM_CMD_OPEN_WRITE_V3;
+ open->mode_flags = 0x00;
+ open->mode_flags |= ASM_LEGACY_STREAM_SESSION;
+
+ /* source endpoint : matrix */
+ open->sink_endpointype = ASM_END_POINT_DEVICE_MATRIX;
+ open->bits_per_sample = bits_per_sample;
+ open->postprocopo_id = ASM_NULL_POPP_TOPOLOGY;
+
+ switch (format) {
+ case FORMAT_LINEAR_PCM:
+ open->dec_fmt_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2;
+ break;
+ default:
+ dev_err(ac->dev, "Invalid format 0x%x\n", format);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ rc = q6asm_ac_send_cmd_sync(ac, pkt);
+ if (rc < 0)
+ goto err;
+
+ ac->io_mode |= ASM_TUN_WRITE_IO_MODE;
+
+err:
+ kfree(pkt);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(q6asm_open_write);
+
+static int __q6asm_run(struct audio_client *ac, uint32_t flags,
+ uint32_t msw_ts, uint32_t lsw_ts, bool wait)
+{
+ struct asm_session_cmd_run_v2 *run;
+ struct apr_pkt *pkt;
+ int pkt_size, rc;
+ void *p;
+
+ pkt_size = APR_HDR_SIZE + sizeof(*run);
+ p = kzalloc(pkt_size, GFP_ATOMIC);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ run = p + APR_HDR_SIZE;
+
+ q6asm_add_hdr(ac, &pkt->hdr, pkt_size, true, ac->stream_id);
+
+ pkt->hdr.opcode = ASM_SESSION_CMD_RUN_V2;
+ run->flags = flags;
+ run->time_lsw = lsw_ts;
+ run->time_msw = msw_ts;
+ if (wait) {
+ rc = q6asm_ac_send_cmd_sync(ac, pkt);
+ } else {
+ rc = apr_send_pkt(ac->adev, pkt);
+ if (rc == pkt_size)
+ rc = 0;
+ }
+
+ kfree(pkt);
+ return rc;
+>>>>>>>
+}
+
+/**
+ * q6asm_run() - start the audio client
+ *
+ * @ac: audio client pointer
+ * @flags: flags associated with write
+ * @msw_ts: timestamp msw
+ * @lsw_ts: timestamp lsw
+ *
+ * Return: Will be an negative value on error or zero on success
+ */
+int q6asm_run(struct audio_client *ac, uint32_t flags,
+ uint32_t msw_ts, uint32_t lsw_ts)
+{
+<<<<<<<
+ return __q6asm_run(ac, flags, msw_ts, lsw_ts, true);
+=======
+ struct q6asm *a = dev_get_drvdata(ac->dev);
+
+ return a->ops->run(ac, flags, msw_ts, lsw_ts, true);
+>>>>>>>
+}
+EXPORT_SYMBOL_GPL(q6asm_run);
+
+/**
+ * q6asm_run_nowait() - start the audio client withou blocking
+ *
+ * @ac: audio client pointer
+ * @flags: flags associated with write
+ * @msw_ts: timestamp msw
+ * @lsw_ts: timestamp lsw
+ *
+ * Return: Will be an negative value on error or zero on success
+ */
+int q6asm_run_nowait(struct audio_client *ac, uint32_t flags,
+ uint32_t msw_ts, uint32_t lsw_ts)
+{
+<<<<<<<
+ return __q6asm_run(ac, flags, msw_ts, lsw_ts, false);
+}
+EXPORT_SYMBOL_GPL(q6asm_run_nowait);
+
+/**
+ * q6asm_media_format_block_multi_ch_pcm() - setup pcm configuration
+ *
+ * @ac: audio client pointer
+ * @rate: audio sample rate
+ * @channels: number of audio channels.
+ * @channel_map: channel map pointer
+ * @bits_per_sample: bits per sample
+ *
+ * Return: Will be an negative value on error or zero on success
+ */
+int q6asm_media_format_block_multi_ch_pcm(struct audio_client *ac,
+ uint32_t rate, uint32_t channels,
+ u8 channel_map[PCM_MAX_NUM_CHANNEL],
+ uint16_t bits_per_sample)
+{
+ struct asm_multi_channel_pcm_fmt_blk_v2 *fmt;
+ struct apr_pkt *pkt;
+ u8 *channel_mapping;
+ void *p;
+ int rc, pkt_size;
+
+ pkt_size = APR_HDR_SIZE + sizeof(*fmt);
+ p = kzalloc(pkt_size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ fmt = p + APR_HDR_SIZE;
+
+ q6asm_add_hdr(ac, &pkt->hdr, pkt_size, true, ac->stream_id);
+
+ pkt->hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+ fmt->fmt_blk.fmt_blk_size = sizeof(*fmt) - sizeof(fmt->fmt_blk);
+ fmt->num_channels = channels;
+ fmt->bits_per_sample = bits_per_sample;
+ fmt->sample_rate = rate;
+ fmt->is_signed = 1;
+
+ channel_mapping = fmt->channel_mapping;
+
+ if (channel_map) {
+ memcpy(channel_mapping, channel_map, PCM_MAX_NUM_CHANNEL);
+ } else {
+ if (q6dsp_map_channels(channel_mapping, channels)) {
+ dev_err(ac->dev, " map channels failed %d\n", channels);
+ rc = -EINVAL;
+ goto err;
+ }
+ }
+
+ rc = q6asm_ac_send_cmd_sync(ac, pkt);
+
+err:
+ kfree(pkt);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(q6asm_media_format_block_multi_ch_pcm);
+
+/**
+ * q6asm_enc_cfg_blk_pcm_format_support() - setup pcm configuration for capture
+=======
+ struct q6asm *a = dev_get_drvdata(ac->dev);
+
+ return a->ops->run(ac, flags, msw_ts, lsw_ts, false);
+}
+EXPORT_SYMBOL_GPL(q6asm_run_nowait);
+
+int q6asm_media_format_block_multi_ch_pcm_v2(struct audio_client *ac,
+ uint32_t rate, uint32_t channels,
+ u8 channel_map[PCM_FORMAT_MAX_NUM_CHANNEL],
+ uint16_t bits_per_sample)
+{
+ struct asm_multi_channel_pcm_fmt_blk_v2 fmt;
+ u8 *channel_mapping;
+ int rc;
+
+ q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), true, ac->stream_id);
+
+ fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+ fmt.fmt_blk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
+ sizeof(fmt.fmt_blk);
+ fmt.num_channels = channels;
+ fmt.bits_per_sample = bits_per_sample;
+ fmt.sample_rate = rate;
+ fmt.is_signed = 1;
+
+ channel_mapping = fmt.channel_mapping;
+
+ if (channel_map) {
+ memcpy(channel_mapping, channel_map,
+ PCM_FORMAT_MAX_NUM_CHANNEL);
+ } else {
+ if (q6dsp_map_channels(channel_mapping, channels)) {
+ dev_err(ac->dev, " map channels failed %d\n", channels);
+ return -EINVAL;
+ }
+ }
+
+ rc = q6asm_ac_send_cmd_sync(ac, &fmt);
+ if (rc < 0)
+ goto fail_cmd;
+
+ return 0;
+fail_cmd:
+ return rc;
+}
+
+int q6asm_media_format_block_multi_ch_pcm_v1(struct audio_client *ac,
+ uint32_t rate, uint32_t channels,
+ u8 channel_map[PCM_FORMAT_MAX_NUM_CHANNEL],
+ uint16_t bits_per_sample)
+{
+ struct asm_stream_media_format_update_v1 fmt;
+ int rc;
+
+ q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), true, ac->stream_id);
+
+ fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FORMAT_UPDATE;
+
+ fmt.format = LINEAR_PCM;
+ fmt.cfg_size = sizeof(struct asm_pcm_cfg);
+ fmt.write_cfg.pcm_cfg.ch_cfg = channels;
+ fmt.write_cfg.pcm_cfg.bits_per_sample = bits_per_sample; //FIXME 16
+ fmt.write_cfg.pcm_cfg.sample_rate = rate;
+ fmt.write_cfg.pcm_cfg.is_signed = 1;
+ fmt.write_cfg.pcm_cfg.interleaved = 1;
+
+ rc = q6asm_ac_send_cmd_sync(ac, &fmt);
+ if (rc < 0)
+ goto fail_cmd;
+
+ return 0;
+fail_cmd:
+ return rc;
+}
+/**
+ * q6asm_media_format_block_multi_ch_pcm() - setup pcm configuration
+>>>>>>>
+ *
+ * @ac: audio client pointer
+ * @rate: audio sample rate
+ * @channels: number of audio channels.
+<<<<<<<
+=======
+ * @use_default_chmap: flag to use default ch map.
+ * @channel_map: channel map pointer
+>>>>>>>
+ * @bits_per_sample: bits per sample
+ *
+ * Return: Will be an negative value on error or zero on success
+ */
+<<<<<<<
+int q6asm_enc_cfg_blk_pcm_format_support(struct audio_client *ac,
+ uint32_t rate, uint32_t channels, uint16_t bits_per_sample)
+{
+ struct asm_multi_channel_pcm_enc_cfg_v2 *enc_cfg;
+ struct apr_pkt *pkt;
+ u8 *channel_mapping;
+ u32 frames_per_buf = 0;
+ int pkt_size, rc;
+ void *p;
+
+ pkt_size = APR_HDR_SIZE + sizeof(*enc_cfg);
+ p = kzalloc(pkt_size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ enc_cfg = p + APR_HDR_SIZE;
+ q6asm_add_hdr(ac, &pkt->hdr, pkt_size, true, ac->stream_id);
+
+ pkt->hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+ enc_cfg->encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
+ enc_cfg->encdec.param_size = sizeof(*enc_cfg) - sizeof(enc_cfg->encdec);
+ enc_cfg->encblk.frames_per_buf = frames_per_buf;
+ enc_cfg->encblk.enc_cfg_blk_size = enc_cfg->encdec.param_size -
+ sizeof(struct asm_enc_cfg_blk_param_v2);
+
+ enc_cfg->num_channels = channels;
+ enc_cfg->bits_per_sample = bits_per_sample;
+ enc_cfg->sample_rate = rate;
+ enc_cfg->is_signed = 1;
+ channel_mapping = enc_cfg->channel_mapping;
+
+ if (q6dsp_map_channels(channel_mapping, channels)) {
+ rc = -EINVAL;
+ goto err;
+ }
+
+ rc = q6asm_ac_send_cmd_sync(ac, pkt);
+err:
+ kfree(pkt);
+ return rc;
+=======
+int q6asm_media_format_block_multi_ch_pcm(struct audio_client *ac,
+ uint32_t rate, uint32_t channels,
+ u8 channel_map[PCM_FORMAT_MAX_NUM_CHANNEL],
+ uint16_t bits_per_sample)
+{
+ struct q6asm *a = dev_get_drvdata(ac->dev);
+
+ return a->ops->format_block_multi_ch_pcm(ac, rate, channels,
+ channel_map,
+ bits_per_sample);
+}
+EXPORT_SYMBOL_GPL(q6asm_media_format_block_multi_ch_pcm);
+
+static int __q6asm_enc_cfg_blk_pcm(struct audio_client *ac,
+ uint32_t rate, uint32_t channels, uint16_t bits_per_sample)
+{
+ struct asm_multi_channel_pcm_enc_cfg_v2 enc_cfg;
+ u8 *channel_mapping;
+ u32 frames_per_buf = 0;
+
+ q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), true, ac->stream_id);
+ enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+ enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
+ enc_cfg.encdec.param_size = sizeof(enc_cfg) - sizeof(enc_cfg.hdr) -
+ sizeof(enc_cfg.encdec);
+ enc_cfg.encblk.frames_per_buf = frames_per_buf;
+ enc_cfg.encblk.enc_cfg_blk_size = enc_cfg.encdec.param_size -
+ sizeof(struct asm_enc_cfg_blk_param_v2);
+
+ enc_cfg.num_channels = channels;
+ enc_cfg.bits_per_sample = bits_per_sample;
+ enc_cfg.sample_rate = rate;
+ enc_cfg.is_signed = 1;
+ channel_mapping = enc_cfg.channel_mapping;
+
+ memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
+
+ if (q6dsp_map_channels(channel_mapping, channels))
+ return -EINVAL;
+
+
+ return q6asm_ac_send_cmd_sync(ac, &enc_cfg);
+
+}
+/**
+ * q6asm_enc_cfg_blk_pcm_format_support() - setup pcm configuration for capture
+ *
+ * @ac: audio client pointer
+ * @rate: audio sample rate
+ * @channels: number of audio channels.
+ * @use_default_chmap: flag to use default ch map.
+ * @channel_map: channel map pointer
+ * @bits_per_sample: bits per sample
+ *
+ * Return: Will be an negative value on error or zero on success
+ */
+int q6asm_enc_cfg_blk_pcm_format_support(struct audio_client *ac,
+ uint32_t rate, uint32_t channels, uint16_t bits_per_sample)
+{
+ return __q6asm_enc_cfg_blk_pcm(ac, rate, channels, bits_per_sample);
+>>>>>>>
+}
+EXPORT_SYMBOL_GPL(q6asm_enc_cfg_blk_pcm_format_support);
+
+/**
+ * q6asm_read() - read data of period size from audio client
+ *
+ * @ac: audio client pointer
+ *
+ * Return: Will be an negative value on error or zero on success
+ */
+int q6asm_read(struct audio_client *ac)
+{
+<<<<<<<
+ struct asm_data_cmd_read_v2 *read;
+ struct audio_port_data *port;
+ struct audio_buffer *ab;
+ struct apr_pkt *pkt;
+ unsigned long flags;
+ int pkt_size;
+ int rc = 0;
+ void *p;
+
+ pkt_size = APR_HDR_SIZE + sizeof(*read);
+ p = kzalloc(pkt_size, GFP_ATOMIC);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ read = p + APR_HDR_SIZE;
+
+ spin_lock_irqsave(&ac->lock, flags);
+ port = &ac->port[SNDRV_PCM_STREAM_CAPTURE];
+ q6asm_add_hdr(ac, &pkt->hdr, pkt_size, false, ac->stream_id);
+ ab = &port->buf[port->dsp_buf];
+ pkt->hdr.opcode = ASM_DATA_CMD_READ_V2;
+ read->buf_addr_lsw = lower_32_bits(ab->phys);
+ read->buf_addr_msw = upper_32_bits(ab->phys);
+ read->mem_map_handle = port->mem_map_handle;
+
+ read->buf_size = ab->size;
+ read->seq_id = port->dsp_buf;
+ pkt->hdr.token = port->dsp_buf;
+
+ port->dsp_buf++;
+
+ if (port->dsp_buf >= port->num_periods)
+ port->dsp_buf = 0;
+
+ spin_unlock_irqrestore(&ac->lock, flags);
+ rc = apr_send_pkt(ac->adev, pkt);
+ if (rc == pkt_size)
+ rc = 0;
+ else
+ pr_err("read op[0x%x]rc[%d]\n", pkt->hdr.opcode, rc);
+
+ kfree(pkt);
+ return rc;
+=======
+ struct asm_data_cmd_read_v2 read;
+ struct audio_port_data *port;
+ struct audio_buffer *ab;
+ int rc;
+
+ if (!(ac->io_mode & ASM_SYNC_IO_MODE))
+ return 0;
+
+ port = &ac->port[SNDRV_PCM_STREAM_CAPTURE];
+ q6asm_add_hdr(ac, &read.hdr, sizeof(read), false, ac->stream_id);
+ ab = &port->buf[port->dsp_buf];
+
+ read.hdr.opcode = ASM_DATA_CMD_READ_V2;
+ read.buf_addr_lsw = lower_32_bits(ab->phys);
+ read.buf_addr_msw = upper_32_bits(ab->phys);
+ read.mem_map_handle = ac->port[SNDRV_PCM_STREAM_CAPTURE].mem_map_handle;
+
+ read.buf_size = ab->size;
+ read.seq_id = port->dsp_buf;
+ read.hdr.token = port->dsp_buf;
+
+ port->dsp_buf++;
+
+ if (port->dsp_buf >= port->num_periods)
+ port->dsp_buf = 0;
+
+ rc = apr_send_pkt(ac->adev, &read);
+ if (rc < 0) {
+ pr_err("read op[0x%x]rc[%d]\n", read.hdr.opcode, rc);
+ return rc;
+ }
+
+ return 0;
+>>>>>>>
+}
+EXPORT_SYMBOL_GPL(q6asm_read);
+
+static int __q6asm_open_read(struct audio_client *ac,
+ uint32_t format, uint16_t bits_per_sample)
+{
+<<<<<<<
+ struct asm_stream_cmd_open_read_v3 *open;
+ struct apr_pkt *pkt;
+ int pkt_size, rc;
+ void *p;
+
+ pkt_size = APR_HDR_SIZE + sizeof(*open);
+ p = kzalloc(pkt_size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ open = p + APR_HDR_SIZE;
+
+ q6asm_add_hdr(ac, &pkt->hdr, pkt_size, true, ac->stream_id);
+ pkt->hdr.opcode = ASM_STREAM_CMD_OPEN_READ_V3;
+ /* Stream prio : High, provide meta info with encoded frames */
+ open->src_endpointype = ASM_END_POINT_DEVICE_MATRIX;
+
+ open->preprocopo_id = ASM_STREAM_POSTPROC_TOPO_ID_NONE;
+ open->bits_per_sample = bits_per_sample;
+ open->mode_flags = 0x0;
+
+ open->mode_flags |= ASM_LEGACY_STREAM_SESSION <<
+=======
+ struct asm_stream_cmd_open_read_v3 open;
+
+ q6asm_add_hdr(ac, &open.hdr, sizeof(open), true, ac->stream_id);
+ open.hdr.opcode = ASM_STREAM_CMD_OPEN_READ_V3;
+ /* Stream prio : High, provide meta info with encoded frames */
+ open.src_endpointype = ASM_END_POINT_DEVICE_MATRIX;
+
+ open.preprocopo_id = ASM_STREAM_POSTPROC_TOPO_ID_NONE;
+ open.bits_per_sample = bits_per_sample;
+ open.mode_flags = 0x0;
+
+ open.mode_flags |= ASM_LEGACY_STREAM_SESSION <<
+>>>>>>>
+ ASM_SHIFT_STREAM_PERF_MODE_FLAG_IN_OPEN_READ;
+
+ switch (format) {
+ case FORMAT_LINEAR_PCM:
+<<<<<<<
+ open->mode_flags |= 0x00;
+ open->enc_cfg_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2;
+=======
+ open.mode_flags |= 0x00;
+ open.enc_cfg_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2;
+>>>>>>>
+ break;
+ default:
+ pr_err("Invalid format[%d]\n", format);
+ }
+
+<<<<<<<
+ rc = q6asm_ac_send_cmd_sync(ac, pkt);
+
+ kfree(pkt);
+ return rc;
+=======
+ return q6asm_ac_send_cmd_sync(ac, &open);
+>>>>>>>
+}
+
+/**
+ * q6asm_open_read() - Open audio client for reading
+ *
+ * @ac: audio client pointer
+ * @format: audio sample format
+ * @bits_per_sample: bits per sample
+ *
+ * Return: Will be an negative value on error or zero on success
+ */
+int q6asm_open_read(struct audio_client *ac, uint32_t format,
+ uint16_t bits_per_sample)
+{
+ return __q6asm_open_read(ac, format, bits_per_sample);
+}
+EXPORT_SYMBOL_GPL(q6asm_open_read);
+
+<<<<<<<
+/**
+ * q6asm_write_async() - non blocking write
+ *
+ * @ac: audio client pointer
+ * @len: lenght in bytes
+ * @msw_ts: timestamp msw
+ * @lsw_ts: timestamp lsw
+ * @wflags: flags associated with write
+ *
+ * Return: Will be an negative value on error or zero on success
+ */
+int q6asm_write_async(struct audio_client *ac, uint32_t len, uint32_t msw_ts,
+ uint32_t lsw_ts, uint32_t wflags)
+{
+ struct asm_data_cmd_write_v2 *write;
+ struct audio_port_data *port;
+ struct audio_buffer *ab;
+ unsigned long flags;
+ struct apr_pkt *pkt;
+ int pkt_size;
+ int rc = 0;
+ void *p;
+
+ pkt_size = APR_HDR_SIZE + sizeof(*write);
+ p = kzalloc(pkt_size, GFP_ATOMIC);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ write = p + APR_HDR_SIZE;
+
+ spin_lock_irqsave(&ac->lock, flags);
+ port = &ac->port[SNDRV_PCM_STREAM_PLAYBACK];
+ q6asm_add_hdr(ac, &pkt->hdr, pkt_size, false, ac->stream_id);
+
+ ab = &port->buf[port->dsp_buf];
+ pkt->hdr.token = port->dsp_buf;
+ pkt->hdr.opcode = ASM_DATA_CMD_WRITE_V2;
+ write->buf_addr_lsw = lower_32_bits(ab->phys);
+ write->buf_addr_msw = upper_32_bits(ab->phys);
+ write->buf_size = len;
+ write->seq_id = port->dsp_buf;
+ write->timestamp_lsw = lsw_ts;
+ write->timestamp_msw = msw_ts;
+ write->mem_map_handle =
+ ac->port[SNDRV_PCM_STREAM_PLAYBACK].mem_map_handle;
+
+ if (wflags == NO_TIMESTAMP)
+ write->flags = (wflags & 0x800000FF);
+ else
+ write->flags = (0x80000000 | wflags);
+=======
+int q6asm_write_async_v2(struct audio_client *ac, uint32_t len, uint32_t msw_ts,
+ uint32_t lsw_ts, uint32_t flags)
+{
+ struct asm_data_cmd_write_v2 write;
+ struct audio_port_data *port;
+ struct audio_buffer *ab;
+ int rc = 0;
+
+ if (!(ac->io_mode & ASM_SYNC_IO_MODE))
+ return 0;
+
+ port = &ac->port[SNDRV_PCM_STREAM_PLAYBACK];
+ q6asm_add_hdr(ac, &write.hdr, sizeof(write), false,
+ ac->stream_id);
+
+ ab = &port->buf[port->dsp_buf];
+
+ write.hdr.token = port->dsp_buf;
+ write.hdr.opcode = ASM_DATA_CMD_WRITE_V2;
+ write.buf_addr_lsw = lower_32_bits(ab->phys);
+ write.buf_addr_msw = upper_32_bits(ab->phys);
+ write.buf_size = len;
+ write.seq_id = port->dsp_buf;
+ write.timestamp_lsw = lsw_ts;
+ write.timestamp_msw = msw_ts;
+ write.mem_map_handle =
+ ac->port[SNDRV_PCM_STREAM_PLAYBACK].mem_map_handle;
+
+ if (flags == NO_TIMESTAMP)
+ write.flags = (flags & 0x800000FF);
+ else
+ write.flags = (0x80000000 | flags);
+>>>>>>>
+
+ port->dsp_buf++;
+
+ if (port->dsp_buf >= port->num_periods)
+ port->dsp_buf = 0;
+
+<<<<<<<
+ rc = apr_send_pkt(ac->adev, &write);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+int q6asm_write_nolock_v1(struct audio_client *ac, uint32_t len, uint32_t msw_ts,
+ uint32_t lsw_ts, uint32_t flags)
+{
+ struct asm_stream_cmd_write_v1 write;
+ struct audio_port_data *port;
+ struct audio_buffer *ab;
+ int rc = 0;
+
+ if (!(ac->io_mode & ASM_SYNC_IO_MODE))
+ return 0;
+
+ port = &ac->port[SNDRV_PCM_STREAM_PLAYBACK];
+ q6asm_add_hdr(ac, &write.hdr, sizeof(write), false,
+ ac->stream_id);
+
+ ab = &port->buf[port->dsp_buf];
+
+ write.hdr.token = port->dsp_buf;
+ write.hdr.opcode = ASM_DATA_CMD_WRITE;
+ write.buf_add = lower_32_bits(ab->phys);
+ write.avail_bytes = len;
+ write.uid = port->dsp_buf;
+ write.lsw_ts = lsw_ts;
+ write.msw_ts = msw_ts;
+
+ if (flags == NO_TIMESTAMP)
+ write.uflags = (flags & 0x800000FF);
+ else
+ write.uflags = (0x80000000 | flags);
+
+ port->dsp_buf++;
+
+ if (port->dsp_buf >= port->num_periods)
+ port->dsp_buf = 0;
+
+ rc = apr_send_pkt(ac->adev, &write);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+/**
+ * q6asm_write_async() - non blocking write
+ *
+ * @ac: audio client pointer
+ * @len: lenght in bytes
+ * @msw_ts: timestamp msw
+ * @lsw_ts: timestamp lsw
+ * @flags: flags associated with write
+ *
+ * Return: Will be an negative value on error or zero on success
+ */
+int q6asm_write_async(struct audio_client *ac, uint32_t len, uint32_t msw_ts,
+ uint32_t lsw_ts, uint32_t flags)
+{
+ struct q6asm *a = dev_get_drvdata(ac->dev);
+
+ return a->ops->write_nolock(ac, len, msw_ts, lsw_ts, flags);
+
+=======
+ spin_unlock_irqrestore(&ac->lock, flags);
+ rc = apr_send_pkt(ac->adev, pkt);
+ if (rc == pkt_size)
+ rc = 0;
+
+ kfree(pkt);
+ return rc;
+>>>>>>>
+}
+EXPORT_SYMBOL_GPL(q6asm_write_async);
+
+static void q6asm_reset_buf_state(struct audio_client *ac)
+{
+<<<<<<<
+ int cnt = 0;
+ int loopcnt = 0;
+ int used;
+ struct audio_port_data *port = NULL;
+
+ if (!(ac->io_mode & ASM_SYNC_IO_MODE))
+ return;
+
+ used = (ac->io_mode & ASM_TUN_WRITE_IO_MODE ? 1 : 0);
+ mutex_lock(&ac->lock);
+ for (loopcnt = 0; loopcnt <= SNDRV_PCM_STREAM_CAPTURE;
+ loopcnt++) {
+ port = &ac->port[loopcnt];
+ cnt = port->num_periods - 1;
+ port->dsp_buf = 0;
+ while (cnt >= 0) {
+ if (!port->buf)
+ continue;
+ port->buf[cnt].used = used;
+ cnt--;
+ }
+ }
+ mutex_unlock(&ac->lock);
+=======
+ struct audio_port_data *port = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ac->lock, flags);
+ port = &ac->port[SNDRV_PCM_STREAM_PLAYBACK];
+ port->dsp_buf = 0;
+ port = &ac->port[SNDRV_PCM_STREAM_CAPTURE];
+ port->dsp_buf = 0;
+ spin_unlock_irqrestore(&ac->lock, flags);
+>>>>>>>
+}
+
+static int __q6asm_cmd(struct audio_client *ac, int cmd, bool wait)
+{
+ int stream_id = ac->stream_id;
+<<<<<<<
+ struct apr_hdr hdr;
+ int rc;
+
+ q6asm_add_hdr(ac, &hdr, sizeof(hdr), true, stream_id);
+
+ switch (cmd) {
+ case CMD_PAUSE:
+ hdr.opcode = ASM_SESSION_CMD_PAUSE;
+ break;
+ case CMD_SUSPEND:
+ hdr.opcode = ASM_SESSION_CMD_SUSPEND;
+ break;
+ case CMD_FLUSH:
+ hdr.opcode = ASM_STREAM_CMD_FLUSH;
+ break;
+ case CMD_OUT_FLUSH:
+ hdr.opcode = ASM_STREAM_CMD_FLUSH_READBUFS;
+ break;
+ case CMD_EOS:
+ hdr.opcode = ASM_DATA_CMD_EOS;
+ break;
+ case CMD_CLOSE:
+ hdr.opcode = ASM_STREAM_CMD_CLOSE;
+=======
+ struct apr_pkt pkt;
+ int rc;
+
+ q6asm_add_hdr(ac, &pkt.hdr, APR_HDR_SIZE, true, stream_id);
+
+ switch (cmd) {
+ case CMD_PAUSE:
+ pkt.hdr.opcode = ASM_SESSION_CMD_PAUSE;
+ break;
+ case CMD_SUSPEND:
+ pkt.hdr.opcode = ASM_SESSION_CMD_SUSPEND;
+ break;
+ case CMD_FLUSH:
+ pkt.hdr.opcode = ASM_STREAM_CMD_FLUSH;
+ break;
+ case CMD_OUT_FLUSH:
+ pkt.hdr.opcode = ASM_STREAM_CMD_FLUSH_READBUFS;
+ break;
+ case CMD_EOS:
+ pkt.hdr.opcode = ASM_DATA_CMD_EOS;
+ break;
+ case CMD_CLOSE:
+ pkt.hdr.opcode = ASM_STREAM_CMD_CLOSE;
+>>>>>>>
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (wait)
+<<<<<<<
+ rc = q6asm_ac_send_cmd_sync(ac, &hdr);
+ else
+ return apr_send_pkt(ac->adev, &hdr);
+=======
+ rc = q6asm_ac_send_cmd_sync(ac, &pkt);
+ else
+ return apr_send_pkt(ac->adev, &pkt);
+>>>>>>>
+
+ if (rc < 0)
+ return rc;
+
+ if (cmd == CMD_FLUSH)
+ q6asm_reset_buf_state(ac);
+
+ return 0;
+}
+
+<<<<<<<
+=======
+static int __q6asm_cmd_v1(struct audio_client *ac, int cmd, bool wait)
+{
+ int stream_id = ac->stream_id;
+ struct apr_hdr hdr;
+ int rc;
+
+ q6asm_add_hdr(ac, &hdr, sizeof(hdr), true, stream_id);
+
+ switch (cmd) {
+ case CMD_PAUSE:
+ hdr.opcode = ASM_SESSION_CMD_PAUSE;
+ break;
+ case CMD_SUSPEND:
+ hdr.opcode = ASM_SESSION_CMD_SUSPEND;
+ break;
+ case CMD_FLUSH:
+ hdr.opcode = ASM_STREAM_CMD_FLUSH;
+ break;
+ case CMD_OUT_FLUSH:
+ hdr.opcode = ASM_STREAM_CMD_FLUSH_READBUFS;
+ break;
+ case CMD_EOS:
+ hdr.opcode = ASM_DATA_CMD_EOS;
+ break;
+ case CMD_CLOSE:
+ hdr.opcode = ASM_STREAM_CMD_CLOSE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (wait)
+ rc = q6asm_ac_send_cmd_sync(ac, &hdr);
+ else
+ return apr_send_pkt(ac->adev, &hdr);
+
+ if (rc < 0)
+ return rc;
+
+ if (cmd == CMD_FLUSH)
+ q6asm_reset_buf_state(ac);
+
+ return 0;
+}
+>>>>>>>
+/**
+ * q6asm_cmd() - run cmd on audio client
+ *
+ * @ac: audio client pointer
+ * @cmd: command to run on audio client.
+ *
+ * Return: Will be an negative value on error or zero on success
+ */
+int q6asm_cmd(struct audio_client *ac, int cmd)
+{
+<<<<<<<
+ return __q6asm_cmd(ac, cmd, true);
+=======
+ struct q6asm *a = dev_get_drvdata(ac->dev);
+
+ return a->ops->cmd(ac, cmd, true);
+
+>>>>>>>
+}
+EXPORT_SYMBOL_GPL(q6asm_cmd);
+
+/**
+ * q6asm_cmd_nowait() - non blocking, run cmd on audio client
+ *
+ * @ac: audio client pointer
+ * @cmd: command to run on audio client.
+ *
+ * Return: Will be an negative value on error or zero on success
+ */
+int q6asm_cmd_nowait(struct audio_client *ac, int cmd)
+{
+<<<<<<<
+ return __q6asm_cmd(ac, cmd, false);
+}
+EXPORT_SYMBOL_GPL(q6asm_cmd_nowait);
+
+static int q6asm_probe(struct apr_device *adev)
+{
+ struct device *dev = &adev->dev;
+ struct device_node *dais_np;
+ struct q6asm *q6asm;
+
+ q6asm = devm_kzalloc(dev, sizeof(*q6asm), GFP_KERNEL);
+ if (!q6asm)
+ return -ENOMEM;
+
+ q6core_get_svc_api_info(adev->svc_id, &q6asm->ainfo);
+
+ q6asm->dev = dev;
+ q6asm->adev = adev;
+ init_waitqueue_head(&q6asm->mem_wait);
+ spin_lock_init(&q6asm->slock);
+ dev_set_drvdata(dev, q6asm);
+
+ dais_np = of_get_child_by_name(dev->of_node, "dais");
+ if (dais_np) {
+ q6asm->pdev_dais = of_platform_device_create(dais_np,
+ "q6asm-dai", dev);
+ of_node_put(dais_np);
+ }
+
+ return 0;
+=======
+ struct q6asm *a = dev_get_drvdata(ac->dev);
+
+ return a->ops->cmd(ac, cmd, false);
+}
+EXPORT_SYMBOL_GPL(q6asm_cmd_nowait);
+
+struct q6asm_ops q6asm_v1_ops = {
+ .memory_unmap = __q6asm_memory_unmap_v1,
+ .memory_map_regions = __q6asm_memory_map_regions_v1,
+ .open_write = q6asm_open_write_v1,
+ .run = __q6asm_run_v1,
+ .format_block_multi_ch_pcm = q6asm_media_format_block_multi_ch_pcm_v1,
+ .write_nolock = q6asm_write_nolock_v1,
+ .cmd = __q6asm_cmd_v1,
+};
+
+struct q6asm_ops q6asm_v2_ops = {
+ .memory_unmap = __q6asm_memory_unmap_v2,
+ .memory_map_regions = __q6asm_memory_map_regions_v2,
+ .open_write = q6asm_open_write_v3,
+ .run = __q6asm_run_v2,
+ .format_block_multi_ch_pcm = q6asm_media_format_block_multi_ch_pcm_v2,
+ .write_nolock = q6asm_write_async_v2,
+ .cmd = __q6asm_cmd,
+};
+
+static int q6asm_probe(struct apr_device *adev)
+{
+ struct q6asm *q6asm;
+
+ q6asm = devm_kzalloc(&adev->dev, sizeof(*q6asm), GFP_KERNEL);
+ if (!q6asm)
+ return -ENOMEM;
+
+ adev->version = q6core_get_svc_version(adev->svc_id);
+
+ if (APR_SVC_MAJOR_VERSION(adev->version) >= 0x7)
+ q6asm->ops = &q6asm_v2_ops;
+ else if (!APR_SVC_MAJOR_VERSION(adev->version))
+ q6asm->ops = &q6asm_v1_ops;
+
+ if (!q6asm->ops) {
+ dev_err(&adev->dev, "Unsupported ASM version\n");
+ return -EINVAL;
+ }
+
+ q6asm->dev = &adev->dev;
+ q6asm->adev = adev;
+ q6asm->mem_state = 0;
+ init_waitqueue_head(&q6asm->mem_wait);
+ mutex_init(&q6asm->session_lock);
+ dev_set_drvdata(&adev->dev, q6asm);
+
+ return q6asm_dai_probe(&adev->dev);
+>>>>>>>
+}
+
+static int q6asm_remove(struct apr_device *adev)
+{
+<<<<<<<
+ return q6asm_dai_remove(&adev->dev);
+}
+
+=======
+ struct q6asm *q6asm = dev_get_drvdata(&adev->dev);
+
+ if (q6asm->pdev_dais)
+ of_platform_device_destroy(&q6asm->pdev_dais->dev, NULL);
+
+ return 0;
+}
+>>>>>>>
+static const struct of_device_id q6asm_device_id[] = {
+ { .compatible = "qcom,q6asm" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, q6asm_device_id);
+
+static struct apr_driver qcom_q6asm_driver = {
+ .probe = q6asm_probe,
+ .remove = q6asm_remove,
+ .callback = q6asm_srvc_callback,
+ .driver = {
+ .name = "qcom-q6asm",
+ .of_match_table = of_match_ptr(q6asm_device_id),
+ },
+};
+
+module_apr_driver(qcom_q6asm_driver);
+MODULE_DESCRIPTION("Q6 Audio Stream Manager driver");
+MODULE_LICENSE("GPL v2");
diff --git a/rr-cache/c9867e004c11ba39a2594258c2956921203f991b/thisimage b/rr-cache/c9867e004c11ba39a2594258c2956921203f991b/thisimage
new file mode 100644
index 0000000..a7c06a1
--- /dev/null
+++ b/rr-cache/c9867e004c11ba39a2594258c2956921203f991b/thisimage
@@ -0,0 +1,2820 @@
+// SPDX-License-Identifier: GPL-2.0
+<<<<<<<
+/*
+ * Copyright (c) 2011-2017, The Linux Foundation
+ * Copyright (c) 2018, Linaro Limited
+ */
+=======
+// Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+// Copyright (c) 2018, Linaro Limited
+>>>>>>>
+
+#include <linux/mutex.h>
+#include <linux/wait.h>
+#include <linux/module.h>
+#include <linux/soc/qcom/apr.h>
+#include <linux/device.h>
+<<<<<<<
+#include <linux/of.h>
+=======
+#include <linux/of_platform.h>
+#include <linux/spinlock.h>
+#include <linux/kref.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+>>>>>>>
+#include <uapi/sound/asound.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include "q6asm.h"
+#include "q6core.h"
+#include "q6dsp-errno.h"
+#include "q6dsp-common.h"
+
+#define ASM_STREAM_CMD_CLOSE 0x00010BCD
+#define ASM_STREAM_CMD_FLUSH 0x00010BCE
+#define ASM_SESSION_CMD_PAUSE 0x00010BD3
+#define ASM_DATA_CMD_EOS 0x00010BDB
+<<<<<<<
+#define ASM_DEFAULT_POPP_TOPOLOGY 0x00010BE4
+=======
+#define ASM_NULL_POPP_TOPOLOGY 0x00010C68
+>>>>>>>
+#define ASM_STREAM_CMD_FLUSH_READBUFS 0x00010C09
+#define ASM_STREAM_CMD_SET_ENCDEC_PARAM 0x00010C10
+#define ASM_STREAM_POSTPROC_TOPO_ID_NONE 0x00010C68
+#define ASM_CMD_SHARED_MEM_MAP_REGIONS 0x00010D92
+#define ASM_CMDRSP_SHARED_MEM_MAP_REGIONS 0x00010D93
+#define ASM_CMD_SHARED_MEM_UNMAP_REGIONS 0x00010D94
+#define ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2 0x00010D98
+#define ASM_DATA_EVENT_WRITE_DONE_V2 0x00010D99
+#define ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2 0x00010DA3
+#define ASM_SESSION_CMD_RUN_V2 0x00010DAA
+#define ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2 0x00010DA5
+#define ASM_DATA_CMD_WRITE_V2 0x00010DAB
+#define ASM_DATA_CMD_READ_V2 0x00010DAC
+#define ASM_SESSION_CMD_SUSPEND 0x00010DEC
+#define ASM_STREAM_CMD_OPEN_WRITE_V3 0x00010DB3
+#define ASM_STREAM_CMD_OPEN_READ_V3 0x00010DB4
+#define ASM_DATA_EVENT_READ_DONE_V2 0x00010D9A
+#define ASM_STREAM_CMD_OPEN_READWRITE_V2 0x00010D8D
+
+
+#define ASM_LEGACY_STREAM_SESSION 0
+/* Bit shift for the stream_perf_mode subfield. */
+#define ASM_SHIFT_STREAM_PERF_MODE_FLAG_IN_OPEN_READ 29
+#define ASM_END_POINT_DEVICE_MATRIX 0
+#define ASM_DEFAULT_APP_TYPE 0
+#define ASM_SYNC_IO_MODE 0x0001
+#define ASM_ASYNC_IO_MODE 0x0002
+#define ASM_TUN_READ_IO_MODE 0x0004 /* tunnel read write mode */
+#define ASM_TUN_WRITE_IO_MODE 0x0008 /* tunnel read write mode */
+#define ASM_SHIFT_GAPLESS_MODE_FLAG 31
+#define ADSP_MEMORY_MAP_SHMEM8_4K_POOL 3
+
+struct avs_cmd_shared_mem_map_regions {
+<<<<<<<
+=======
+ struct apr_hdr hdr;
+>>>>>>>
+ u16 mem_pool_id;
+ u16 num_regions;
+ u32 property_flag;
+} __packed;
+
+struct avs_shared_map_region_payload {
+ u32 shm_addr_lsw;
+ u32 shm_addr_msw;
+ u32 mem_size_bytes;
+} __packed;
+
+struct avs_cmd_shared_mem_unmap_regions {
+<<<<<<<
+=======
+ struct apr_hdr hdr;
+>>>>>>>
+ u32 mem_map_handle;
+} __packed;
+
+struct asm_data_cmd_media_fmt_update_v2 {
+ u32 fmt_blk_size;
+} __packed;
+
+struct asm_multi_channel_pcm_fmt_blk_v2 {
+<<<<<<<
+=======
+ struct apr_hdr hdr;
+>>>>>>>
+ struct asm_data_cmd_media_fmt_update_v2 fmt_blk;
+ u16 num_channels;
+ u16 bits_per_sample;
+ u32 sample_rate;
+ u16 is_signed;
+ u16 reserved;
+<<<<<<<
+ u8 channel_mapping[PCM_FORMAT_MAX_NUM_CHANNEL];
+=======
+ u8 channel_mapping[PCM_MAX_NUM_CHANNEL];
+>>>>>>>
+} __packed;
+
+struct asm_stream_cmd_set_encdec_param {
+ u32 param_id;
+ u32 param_size;
+} __packed;
+
+struct asm_enc_cfg_blk_param_v2 {
+ u32 frames_per_buf;
+ u32 enc_cfg_blk_size;
+} __packed;
+
+struct asm_multi_channel_pcm_enc_cfg_v2 {
+<<<<<<<
+=======
+ struct apr_hdr hdr;
+>>>>>>>
+ struct asm_stream_cmd_set_encdec_param encdec;
+ struct asm_enc_cfg_blk_param_v2 encblk;
+ uint16_t num_channels;
+ uint16_t bits_per_sample;
+ uint32_t sample_rate;
+ uint16_t is_signed;
+ uint16_t reserved;
+ uint8_t channel_mapping[8];
+} __packed;
+
+struct asm_data_cmd_read_v2 {
+<<<<<<<
+=======
+ struct apr_hdr hdr;
+>>>>>>>
+ u32 buf_addr_lsw;
+ u32 buf_addr_msw;
+ u32 mem_map_handle;
+ u32 buf_size;
+ u32 seq_id;
+} __packed;
+
+struct asm_data_cmd_read_v2_done {
+ u32 status;
+ u32 buf_addr_lsw;
+ u32 buf_addr_msw;
+};
+
+struct asm_stream_cmd_open_read_v3 {
+<<<<<<<
+=======
+ struct apr_hdr hdr;
+>>>>>>>
+ u32 mode_flags;
+ u32 src_endpointype;
+ u32 preprocopo_id;
+ u32 enc_cfg_id;
+ u16 bits_per_sample;
+ u16 reserved;
+} __packed;
+
+struct asm_data_cmd_write_v2 {
+<<<<<<<
+=======
+ struct apr_hdr hdr;
+>>>>>>>
+ u32 buf_addr_lsw;
+ u32 buf_addr_msw;
+ u32 mem_map_handle;
+ u32 buf_size;
+ u32 seq_id;
+ u32 timestamp_lsw;
+ u32 timestamp_msw;
+ u32 flags;
+} __packed;
+
+struct asm_stream_cmd_open_write_v3 {
+<<<<<<<
+=======
+ struct apr_hdr hdr;
+>>>>>>>
+ uint32_t mode_flags;
+ uint16_t sink_endpointype;
+ uint16_t bits_per_sample;
+ uint32_t postprocopo_id;
+ uint32_t dec_fmt_id;
+} __packed;
+
+struct asm_session_cmd_run_v2 {
+<<<<<<<
+=======
+ struct apr_hdr hdr;
+>>>>>>>
+ u32 flags;
+ u32 time_lsw;
+ u32 time_msw;
+} __packed;
+
+struct audio_buffer {
+ phys_addr_t phys;
+<<<<<<<
+=======
+ uint32_t used;
+>>>>>>>
+ uint32_t size; /* size of buffer */
+};
+
+struct audio_port_data {
+ struct audio_buffer *buf;
+ uint32_t num_periods;
+ uint32_t dsp_buf;
+ uint32_t mem_map_handle;
+};
+
+<<<<<<<
+struct audio_client {
+ int session;
+ q6asm_cb cb;
+ int cmd_state;
+ void *priv;
+ uint32_t io_mode;
+ struct apr_device *adev;
+ struct mutex lock;
+ /* idx:1 out port, 0: in port */
+ struct audio_port_data port[2];
+ wait_queue_head_t cmd_wait;
+ int perf_mode;
+ int stream_id;
+ struct device *dev;
+};
+
+struct q6asm_ops {
+ int (*memory_unmap)(struct audio_client *ac,
+ phys_addr_t buf_add, int dir);
+ int (*memory_map_regions)(struct audio_client *ac, int dir,
+ size_t period_sz, unsigned int periods,
+ bool is_contiguous);
+ int (*open_write)(struct audio_client *ac, uint32_t format,
+ uint16_t bits_per_sample);
+ int (*run)(struct audio_client *ac, uint32_t flags,
+ uint32_t msw_ts, uint32_t lsw_ts, bool wait);
+ int (*format_block_multi_ch_pcm)(struct audio_client *ac,
+ uint32_t rate, uint32_t channels,
+ u8 channel_map[PCM_FORMAT_MAX_NUM_CHANNEL],
+ uint16_t bits_per_sample);
+ int (*write_nolock)(struct audio_client *ac, uint32_t len, uint32_t msw_ts,
+ uint32_t lsw_ts, uint32_t flags);
+ int (*cmd)(struct audio_client *ac, int cmd, bool wait);
+};
+
+
+struct q6asm {
+ struct apr_device *adev;
+ int mem_state;
+ struct device *dev;
+ wait_queue_head_t mem_wait;
+ struct mutex session_lock;
+ struct platform_device *pcmdev;
+ struct audio_client *session[MAX_SESSIONS + 1];
+ void *dai_data;
+ struct q6asm_ops *ops;
+};
+
+/****** V1 ***/
+/* bit 0:1 represents priority of stream */
+#define STREAM_PRIORITY_NORMAL 0x0000
+#define STREAM_PRIORITY_LOW 0x0001
+#define STREAM_PRIORITY_HIGH 0x0002
+
+/* Supported formats */
+#define LINEAR_PCM 0x00010BE5
+
+#define ASM_STREAM_CMD_OPEN_WRITE 0x00010BCA
+#define ASM_STREAM_CMD_OPEN_WRITE_V2_1 0x00010DB1
+struct asm_stream_cmd_open_write_v1 {
+ struct apr_hdr hdr;
+ u32 uMode;
+ u16 sink_endpoint;
+ u16 stream_handle;
+ u32 post_proc_top;
+ u32 format;
+} __attribute__((packed));
+
+#define ASM_DATA_CMD_MEDIA_FORMAT_UPDATE 0x00010BDC
+#define ASM_DATA_EVENT_ENC_SR_CM_NOTIFY 0x00010BDE
+
+struct asm_pcm_cfg {
+ u16 ch_cfg;
+ u16 bits_per_sample;
+ u32 sample_rate;
+ u16 is_signed;
+ u16 interleaved;
+};
+
+struct asm_stream_media_format_update_v1{
+ struct apr_hdr hdr;
+ u32 format;
+ u32 cfg_size;
+ union {
+ struct asm_pcm_cfg pcm_cfg;
+ } __attribute__((packed)) write_cfg;
+} __attribute__((packed));
+
+
+#define ASM_DATA_CMD_WRITE 0x00010BD9
+struct asm_stream_cmd_write_v1{
+ struct apr_hdr hdr;
+ u32 buf_add;
+ u32 avail_bytes;
+ u32 uid;
+ u32 msw_ts;
+ u32 lsw_ts;
+ u32 uflags;
+} __attribute__((packed));
+
+#define ASM_DATA_EVENT_WRITE_DONE 0x00010BDF
+struct asm_data_event_write_done{
+ u32 buf_add;
+ u32 status;
+} __attribute__((packed));
+
+#define ASM_SESSION_CMD_RUN 0x00010BD2
+struct asm_stream_cmd_run_v1{
+ struct apr_hdr hdr;
+ u32 flags;
+ u32 msw_ts;
+ u32 lsw_ts;
+} __attribute__((packed));
+
+/* Session Level commands */
+#define ASM_SESSION_CMD_MEMORY_MAP 0x00010C32
+struct asm_stream_cmd_memory_map{
+ struct apr_hdr hdr;
+ u32 buf_add;
+ u32 buf_size;
+ u16 mempool_id;
+ u16 reserved;
+} __attribute__((packed));
+
+#define ASM_SESSION_CMD_MEMORY_UNMAP 0x00010C33
+struct asm_stream_cmd_memory_unmap{
+ struct apr_hdr hdr;
+ u32 buf_add;
+} __attribute__((packed));
+
+#define ASM_SESSION_CMD_MEMORY_MAP_REGIONS 0x00010C45
+struct asm_memory_map_regions{
+ u32 phys;
+ u32 buf_size;
+} __attribute__((packed));
+
+struct asm_stream_cmd_memory_map_regions{
+ struct apr_hdr hdr;
+ u16 mempool_id;
+ u16 nregions;
+} __attribute__((packed));
+
+#define ASM_SESSION_CMD_MEMORY_UNMAP_REGIONS 0x00010C46
+struct asm_memory_unmap_regions{
+ u32 phys;
+} __attribute__((packed));
+
+struct asm_stream_cmd_memory_unmap_regions{
+ struct apr_hdr hdr;
+ u16 nregions;
+ u16 reserved;
+} __attribute__((packed));
+
+/**********/
+static bool q6asm_is_valid_audio_client(struct audio_client *ac)
+{
+ struct q6asm *a = dev_get_drvdata(ac->dev);
+ int n;
+
+ if (!ac)
+ return false;
+
+ for (n = 1; n <= MAX_SESSIONS; n++) {
+ if (a->session[n] == ac)
+ return true;
+ }
+
+ return false;
+}
+
+=======
+struct q6asm {
+ struct apr_device *adev;
+ struct device *dev;
+ struct q6core_svc_api_info ainfo;
+ wait_queue_head_t mem_wait;
+ struct platform_device *pcmdev;
+ spinlock_t slock;
+ struct audio_client *session[MAX_SESSIONS + 1];
+ struct platform_device *pdev_dais;
+};
+
+struct audio_client {
+ int session;
+ q6asm_cb cb;
+ void *priv;
+ uint32_t io_mode;
+ struct apr_device *adev;
+ struct mutex cmd_lock;
+ spinlock_t lock;
+ struct kref refcount;
+ /* idx:1 out port, 0: in port */
+ struct audio_port_data port[2];
+ wait_queue_head_t cmd_wait;
+ struct aprv2_ibasic_rsp_result_t result;
+ int perf_mode;
+ int stream_id;
+ struct q6asm *q6asm;
+ struct device *dev;
+};
+
+>>>>>>>
+static inline void q6asm_add_hdr(struct audio_client *ac, struct apr_hdr *hdr,
+ uint32_t pkt_size, bool cmd_flg,
+ uint32_t stream_id)
+{
+ hdr->hdr_field = APR_SEQ_CMD_HDR_FIELD;
+<<<<<<<
+=======
+ hdr->src_svc = ac->adev->svc_id;
+ hdr->src_domain = APR_DOMAIN_APPS;
+ hdr->dest_svc = APR_SVC_ASM;
+ hdr->dest_domain = APR_DOMAIN_ADSP;
+>>>>>>>
+ hdr->src_port = ((ac->session << 8) & 0xFF00) | (stream_id);
+ hdr->dest_port = ((ac->session << 8) & 0xFF00) | (stream_id);
+ hdr->pkt_size = pkt_size;
+ if (cmd_flg)
+ hdr->token = ac->session;
+}
+
+static int q6asm_apr_send_session_pkt(struct q6asm *a, struct audio_client *ac,
+<<<<<<<
+ struct apr_pkt *pkt, uint32_t rsp_opcode)
+{
+ struct apr_hdr *hdr = &pkt->hdr;
+ int rc;
+
+ mutex_lock(&ac->cmd_lock);
+ ac->result.opcode = 0;
+ ac->result.status = 0;
+ rc = apr_send_pkt(a->adev, pkt);
+ if (rc < 0)
+ goto err;
+
+ if (rsp_opcode)
+ rc = wait_event_timeout(a->mem_wait,
+ (ac->result.opcode == hdr->opcode) ||
+ (ac->result.opcode == rsp_opcode),
+ 5 * HZ);
+ else
+ rc = wait_event_timeout(a->mem_wait,
+ (ac->result.opcode == hdr->opcode),
+ 5 * HZ);
+
+ if (!rc) {
+ dev_err(a->dev, "CMD timeout\n");
+ rc = -ETIMEDOUT;
+ } else if (ac->result.status > 0) {
+ dev_err(a->dev, "DSP returned error[%x]\n",
+ ac->result.status);
+ rc = -EINVAL;
+ }
+
+err:
+ mutex_unlock(&ac->cmd_lock);
+ return rc;
+}
+
+static int __q6asm_memory_unmap(struct audio_client *ac,
+ phys_addr_t buf_add, int dir)
+{
+ struct avs_cmd_shared_mem_unmap_regions *mem_unmap;
+ struct q6asm *a = dev_get_drvdata(ac->dev->parent);
+ struct apr_pkt *pkt;
+ int rc, pkt_size;
+ void *p;
+=======
+ void *data)
+{
+ int rc;
+
+ mutex_lock(&a->session_lock);
+ a->mem_state = 1;
+ rc = apr_send_pkt(a->adev, data);
+ if (rc < 0)
+ goto err;
+
+ rc = wait_event_timeout(a->mem_wait, (a->mem_state <= 0), 5 * HZ);
+ if (!rc) {
+ dev_err(a->dev, "CMD timeout\n");
+ rc = -ETIMEDOUT;
+ } else if (a->mem_state < 0) {
+ rc = q6dsp_errno(a->mem_state);
+ }
+
+err:
+ mutex_unlock(&a->session_lock);
+ return rc;
+}
+
+static int __q6asm_memory_unmap_v2(struct audio_client *ac,
+ phys_addr_t buf_add, int dir)
+{
+ struct avs_cmd_shared_mem_unmap_regions mem_unmap;
+ struct q6asm *a = dev_get_drvdata(ac->dev);
+ int rc;
+>>>>>>>
+
+ if (ac->port[dir].mem_map_handle == 0) {
+ dev_err(ac->dev, "invalid mem handle\n");
+ return -EINVAL;
+ }
+
+<<<<<<<
+ mem_unmap.hdr.hdr_field = APR_SEQ_CMD_HDR_FIELD;
+ mem_unmap.hdr.src_port = 0;
+ mem_unmap.hdr.dest_port = 0;
+ mem_unmap.hdr.pkt_size = sizeof(mem_unmap);
+ mem_unmap.hdr.token = ((ac->session << 8) | dir);
+
+ mem_unmap.hdr.opcode = ASM_CMD_SHARED_MEM_UNMAP_REGIONS;
+ mem_unmap.mem_map_handle = ac->port[dir].mem_map_handle;
+
+ rc = q6asm_apr_send_session_pkt(a, ac, &mem_unmap);
+ if (rc < 0)
+ return rc;
+
+ ac->port[dir].mem_map_handle = 0;
+
+ return 0;
+}
+
+static int __q6asm_memory_unmap_v1(struct audio_client *ac,
+ phys_addr_t buf_add, int dir)
+{
+ struct asm_stream_cmd_memory_unmap_regions *mem_unmap;
+ struct asm_memory_unmap_regions *mregions = NULL;
+ struct audio_port_data *port = NULL;
+ struct audio_buffer *ab = NULL;
+ void *b;
+
+ struct q6asm *a = dev_get_drvdata(ac->dev);
+ int rc, cmd_size, bufcnt, i;
+
+ port = &ac->port[dir];
+ bufcnt = port->num_periods;
+
+ if (ac->port[dir].mem_map_handle == 0) {
+ dev_err(ac->dev, "invalid mem handle\n");
+ return -EINVAL;
+ }
+
+ cmd_size = sizeof(struct asm_stream_cmd_memory_unmap_regions) +
+ sizeof(struct asm_memory_unmap_regions) * bufcnt;
+
+ b = kzalloc(cmd_size, GFP_KERNEL);
+ mem_unmap = b;
+ if (!mem_unmap)
+ return -ENOMEM;
+
+
+ mem_unmap->hdr.hdr_field = APR_SEQ_CMD_HDR_FIELD;
+ mem_unmap->hdr.src_port = 0;
+ mem_unmap->hdr.dest_port = 0;
+ mem_unmap->hdr.pkt_size = cmd_size;
+ mem_unmap->hdr.token = ((ac->session << 8) | dir);
+
+ mem_unmap->hdr.opcode = ASM_SESSION_CMD_MEMORY_UNMAP_REGIONS;
+ mem_unmap->nregions = bufcnt & 0x00ff;
+
+ mregions = b + sizeof(*mem_unmap);
+
+ for (i = 0; i < bufcnt; i++) {
+ ab = &port->buf[i];
+ mregions->phys = lower_32_bits(ab->phys);
+ ++mregions;
+ }
+
+ rc = q6asm_apr_send_session_pkt(a, ac, mem_unmap);
+ if (rc < 0)
+ return rc;
+
+ ac->port[dir].mem_map_handle = 0;
+
+ kfree(b);
+ return 0;
+=======
+ pkt_size = APR_HDR_SIZE + sizeof(*mem_unmap);
+ p = kzalloc(pkt_size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ mem_unmap = p + APR_HDR_SIZE;
+
+ pkt->hdr.hdr_field = APR_SEQ_CMD_HDR_FIELD;
+ pkt->hdr.src_port = 0;
+ pkt->hdr.dest_port = 0;
+ pkt->hdr.pkt_size = pkt_size;
+ pkt->hdr.token = ((ac->session << 8) | dir);
+
+ pkt->hdr.opcode = ASM_CMD_SHARED_MEM_UNMAP_REGIONS;
+ mem_unmap->mem_map_handle = ac->port[dir].mem_map_handle;
+
+ rc = q6asm_apr_send_session_pkt(a, ac, pkt, 0);
+ if (rc < 0) {
+ kfree(pkt);
+ return rc;
+ }
+
+ ac->port[dir].mem_map_handle = 0;
+
+ kfree(pkt);
+ return 0;
+}
+
+
+static void q6asm_audio_client_free_buf(struct audio_client *ac,
+ struct audio_port_data *port)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ac->lock, flags);
+ port->num_periods = 0;
+ kfree(port->buf);
+ port->buf = NULL;
+ spin_unlock_irqrestore(&ac->lock, flags);
+>>>>>>>
+}
+
+/**
+ * q6asm_unmap_memory_regions() - unmap memory regions in the dsp.
+ *
+ * @dir: direction of audio stream
+ * @ac: audio client instanace
+ *
+ * Return: Will be an negative value on failure or zero on success
+ */
+int q6asm_unmap_memory_regions(unsigned int dir, struct audio_client *ac)
+{
+<<<<<<<
+=======
+ struct q6asm *a = dev_get_drvdata(ac->dev);
+>>>>>>>
+ struct audio_port_data *port;
+ int cnt = 0;
+ int rc = 0;
+
+<<<<<<<
+=======
+ mutex_lock(&ac->lock);
+>>>>>>>
+ port = &ac->port[dir];
+ if (!port->buf) {
+ rc = -EINVAL;
+ goto err;
+ }
+<<<<<<<
+ cnt = port->num_periods - 1;
+ if (cnt >= 0) {
+ a->ops->memory_unmap(ac, port->buf[dir].phys, dir);
+=======
+
+ cnt = port->num_periods - 1;
+ if (cnt >= 0) {
+ rc = __q6asm_memory_unmap(ac, port->buf[dir].phys, dir);
+>>>>>>>
+ if (rc < 0) {
+ dev_err(ac->dev, "%s: Memory_unmap_regions failed %d\n",
+ __func__, rc);
+ goto err;
+ }
+ }
+
+<<<<<<<
+ port->num_periods = 0;
+ kfree(port->buf);
+ port->buf = NULL;
+
+err:
+ mutex_unlock(&ac->lock);
+=======
+ q6asm_audio_client_free_buf(ac, port);
+
+err:
+>>>>>>>
+ return rc;
+}
+EXPORT_SYMBOL_GPL(q6asm_unmap_memory_regions);
+
+<<<<<<<
+static int __q6asm_memory_map_regions(struct audio_client *ac, int dir,
+ size_t period_sz, unsigned int periods,
+ bool is_contiguous)
+{
+ struct avs_cmd_shared_mem_map_regions *cmd = NULL;
+ struct avs_shared_map_region_payload *mregions = NULL;
+ struct q6asm *a = dev_get_drvdata(ac->dev->parent);
+ struct audio_port_data *port = NULL;
+ struct audio_buffer *ab = NULL;
+ struct apr_pkt *pkt;
+ void *p;
+ unsigned long flags;
+ uint32_t num_regions, buf_sz;
+ int rc, i, pkt_size;
+
+ if (is_contiguous) {
+ num_regions = 1;
+ buf_sz = period_sz * periods;
+ } else {
+ buf_sz = period_sz;
+ num_regions = periods;
+ }
+
+ /* DSP expects size should be aligned to 4K */
+ buf_sz = ALIGN(buf_sz, 4096);
+
+ pkt_size = APR_HDR_SIZE + sizeof(*cmd) +
+ (sizeof(*mregions) * num_regions);
+
+ p = kzalloc(pkt_size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ cmd = p + APR_HDR_SIZE;
+ mregions = p + APR_HDR_SIZE + sizeof(*cmd);
+
+ pkt->hdr.hdr_field = APR_SEQ_CMD_HDR_FIELD;
+ pkt->hdr.src_port = 0;
+ pkt->hdr.dest_port = 0;
+ pkt->hdr.pkt_size = pkt_size;
+ pkt->hdr.token = ((ac->session << 8) | dir);
+ pkt->hdr.opcode = ASM_CMD_SHARED_MEM_MAP_REGIONS;
+
+=======
+static int __q6asm_memory_map_regions_v1(struct audio_client *ac, int dir,
+ size_t period_sz, unsigned int periods,
+ bool is_contiguous)
+{
+ struct asm_stream_cmd_memory_map_regions *cmd = NULL;
+ struct asm_memory_map_regions *mregions = NULL;
+ struct q6asm *a = dev_get_drvdata(ac->dev);
+ struct audio_port_data *port = NULL;
+ struct audio_buffer *ab = NULL;
+ void *mmap_region_cmd = NULL;
+ uint32_t num_regions, buf_sz;
+ int rc, i, cmd_size;
+
+ num_regions = is_contiguous ? 1 : periods;
+ buf_sz = is_contiguous ? (period_sz * periods) : period_sz;
+ buf_sz = PAGE_ALIGN(buf_sz);
+
+ cmd_size = sizeof(*cmd) + (sizeof(*mregions) * num_regions);
+ mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
+ if (!mmap_region_cmd)
+ return -ENOMEM;
+
+ cmd = mmap_region_cmd;
+
+ cmd->hdr.hdr_field = APR_SEQ_CMD_HDR_FIELD;
+ cmd->hdr.src_port = 0;
+ cmd->hdr.dest_port = 0;
+ cmd->hdr.pkt_size = cmd_size;
+ cmd->hdr.token = ((ac->session << 8) | dir);
+
+
+ cmd->hdr.opcode = ASM_SESSION_CMD_MEMORY_MAP_REGIONS;
+ cmd->mempool_id = 0;
+ cmd->nregions = num_regions;
+
+ mregions = mmap_region_cmd + sizeof(*cmd);
+
+ port = &ac->port[dir];
+
+ for (i = 0; i < num_regions; i++) {
+ ab = &port->buf[i];
+ mregions->phys = lower_32_bits(ab->phys);
+ mregions->buf_size = buf_sz;
+ ++mregions;
+ }
+
+ rc = q6asm_apr_send_session_pkt(a, ac, mmap_region_cmd);
+
+ kfree(mmap_region_cmd);
+
+ return rc;
+}
+
+static int __q6asm_memory_map_regions_v2(struct audio_client *ac, int dir,
+ size_t period_sz, unsigned int periods,
+ bool is_contiguous)
+{
+ struct avs_cmd_shared_mem_map_regions *cmd = NULL;
+ struct avs_shared_map_region_payload *mregions = NULL;
+ struct q6asm *a = dev_get_drvdata(ac->dev);
+ struct audio_port_data *port = NULL;
+ struct audio_buffer *ab = NULL;
+ void *mmap_region_cmd = NULL;
+ uint32_t num_regions, buf_sz;
+ int rc, i, cmd_size;
+
+ num_regions = is_contiguous ? 1 : periods;
+ buf_sz = is_contiguous ? (period_sz * periods) : period_sz;
+ buf_sz = PAGE_ALIGN(buf_sz);
+
+ cmd_size = sizeof(*cmd) + (sizeof(*mregions) * num_regions);
+ mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
+ if (!mmap_region_cmd)
+ return -ENOMEM;
+
+ cmd = mmap_region_cmd;
+
+ cmd->hdr.hdr_field = APR_SEQ_CMD_HDR_FIELD;
+ cmd->hdr.src_port = 0;
+ cmd->hdr.dest_port = 0;
+ cmd->hdr.pkt_size = cmd_size;
+ cmd->hdr.token = ((ac->session << 8) | dir);
+
+
+ cmd->hdr.opcode = ASM_CMD_SHARED_MEM_MAP_REGIONS;
+>>>>>>>
+ cmd->mem_pool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL;
+ cmd->num_regions = num_regions;
+ cmd->property_flag = 0x00;
+
+<<<<<<<
+ mregions = mmap_region_cmd + sizeof(*cmd);
+
+=======
+ spin_lock_irqsave(&ac->lock, flags);
+>>>>>>>
+ port = &ac->port[dir];
+
+ for (i = 0; i < num_regions; i++) {
+ ab = &port->buf[i];
+ mregions->shm_addr_lsw = lower_32_bits(ab->phys);
+ mregions->shm_addr_msw = upper_32_bits(ab->phys);
+ mregions->mem_size_bytes = buf_sz;
+ ++mregions;
+ }
+<<<<<<<
+ spin_unlock_irqrestore(&ac->lock, flags);
+
+ rc = q6asm_apr_send_session_pkt(a, ac, pkt,
+ ASM_CMDRSP_SHARED_MEM_MAP_REGIONS);
+
+ kfree(pkt);
+=======
+
+ rc = q6asm_apr_send_session_pkt(a, ac, mmap_region_cmd);
+
+ kfree(mmap_region_cmd);
+>>>>>>>
+
+ return rc;
+}
+
+/**
+ * q6asm_map_memory_regions() - map memory regions in the dsp.
+ *
+ * @dir: direction of audio stream
+ * @ac: audio client instanace
+ * @phys: physcial address that needs mapping.
+ * @period_sz: audio period size
+ * @periods: number of periods
+ *
+ * Return: Will be an negative value on failure or zero on success
+ */
+int q6asm_map_memory_regions(unsigned int dir, struct audio_client *ac,
+ phys_addr_t phys,
+ size_t period_sz, unsigned int periods)
+{
+<<<<<<<
+ struct audio_buffer *buf;
+ unsigned long flags;
+ int cnt;
+ int rc;
+
+ spin_lock_irqsave(&ac->lock, flags);
+ if (ac->port[dir].buf) {
+ dev_err(ac->dev, "Buffer already allocated\n");
+ spin_unlock_irqrestore(&ac->lock, flags);
+ return 0;
+ }
+
+ buf = kzalloc(((sizeof(struct audio_buffer)) * periods), GFP_ATOMIC);
+ if (!buf) {
+ spin_unlock_irqrestore(&ac->lock, flags);
+ return -ENOMEM;
+=======
+ struct q6asm *a = dev_get_drvdata(ac->dev);
+ struct audio_buffer *buf;
+ int cnt;
+ int rc;
+
+ mutex_lock(&ac->lock);
+ if (ac->port[dir].buf) {
+ dev_err(ac->dev, "Buffer already allocated\n");
+ rc = 0;
+ goto err;
+ }
+
+
+ buf = kzalloc(((sizeof(struct audio_buffer)) * periods), GFP_KERNEL);
+ if (!buf) {
+ rc = -ENOMEM;
+ goto err;
+>>>>>>>
+ }
+
+
+ ac->port[dir].buf = buf;
+
+ buf[0].phys = phys;
+<<<<<<<
+=======
+ buf[0].used = !!dir;
+>>>>>>>
+ buf[0].size = period_sz;
+
+ for (cnt = 1; cnt < periods; cnt++) {
+ if (period_sz > 0) {
+ buf[cnt].phys = buf[0].phys + (cnt * period_sz);
+<<<<<<<
+ buf[cnt].size = period_sz;
+ }
+ }
+ ac->port[dir].num_periods = periods;
+
+ spin_unlock_irqrestore(&ac->lock, flags);
+
+ rc = __q6asm_memory_map_regions(ac, dir, period_sz, periods, 1);
+ if (rc < 0) {
+ dev_err(ac->dev, "Memory_map_regions failed\n");
+ q6asm_audio_client_free_buf(ac, &ac->port[dir]);
+ }
+
+=======
+ buf[cnt].used = dir ^ 1;
+ buf[cnt].size = period_sz;
+ }
+ }
+
+ ac->port[dir].num_periods = periods;
+ rc = a->ops->memory_map_regions(ac, dir, period_sz, periods, 1);
+ if (rc < 0) {
+ dev_err(ac->dev, "Memory_map_regions failed\n");
+ ac->port[dir].num_periods = 0;
+ kfree(buf);
+ ac->port[dir].buf = NULL;
+ goto err;
+ }
+
+err:
+ mutex_unlock(&ac->lock);
+>>>>>>>
+ return rc;
+}
+EXPORT_SYMBOL_GPL(q6asm_map_memory_regions);
+
+<<<<<<<
+=======
+static void q6asm_audio_client_release(struct kref *ref)
+{
+ struct audio_client *ac;
+ struct q6asm *a;
+ unsigned long flags;
+
+ ac = container_of(ref, struct audio_client, refcount);
+ a = ac->q6asm;
+
+ spin_lock_irqsave(&a->slock, flags);
+ a->session[ac->session] = NULL;
+ spin_unlock_irqrestore(&a->slock, flags);
+
+ kfree(ac);
+}
+
+>>>>>>>
+/**
+ * q6asm_audio_client_free() - Freee allocated audio client
+ *
+ * @ac: audio client to free
+ */
+void q6asm_audio_client_free(struct audio_client *ac)
+{
+<<<<<<<
+ kref_put(&ac->refcount, q6asm_audio_client_release);
+=======
+ struct q6asm *a = dev_get_drvdata(ac->dev);
+
+ mutex_lock(&a->session_lock);
+ a->session[ac->session] = NULL;
+ mutex_unlock(&a->session_lock);
+ kfree(ac);
+>>>>>>>
+}
+EXPORT_SYMBOL_GPL(q6asm_audio_client_free);
+
+static struct audio_client *q6asm_get_audio_client(struct q6asm *a,
+ int session_id)
+{
+<<<<<<<
+ if ((session_id <= 0) || (session_id > MAX_SESSIONS)) {
+ dev_err(a->dev, "invalid session: %d\n", session_id);
+ return NULL;
+ }
+
+ if (!a->session[session_id]) {
+ dev_err(a->dev, "session not active: %d\n", session_id);
+ return NULL;
+ }
+
+ return a->session[session_id];
+}
+
+/**
+ * q6asm_set_dai_data() - set dai private data
+ *
+ * @dev: Pointer to asm device.
+ * @data: dai private data
+ *
+ */
+void q6asm_set_dai_data(struct device *dev, void *data)
+{
+ struct q6asm *a = dev_get_drvdata(dev);
+
+ a->dai_data = data;
+}
+EXPORT_SYMBOL_GPL(q6asm_set_dai_data);
+
+/**
+ * q6asm_get_dai_data() - get dai private data
+ *
+ * @dev: Pointer to asm device.
+ *
+ * Return: pointer to dai private data
+ */
+void *q6asm_get_dai_data(struct device *dev)
+{
+ struct q6asm *a = dev_get_drvdata(dev);
+
+ return a->dai_data;
+}
+EXPORT_SYMBOL_GPL(q6asm_get_dai_data);
+
+static int32_t q6asm_stream_callback(struct apr_device *adev,
+ struct apr_client_message *data,
+=======
+ struct audio_client *ac = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&a->slock, flags);
+ if ((session_id <= 0) || (session_id > MAX_SESSIONS)) {
+ dev_err(a->dev, "invalid session: %d\n", session_id);
+ goto err;
+ }
+
+ /* check for valid session */
+ if (!a->session[session_id])
+ goto err;
+ else if (a->session[session_id]->session != session_id)
+ goto err;
+
+ ac = a->session[session_id];
+ kref_get(&ac->refcount);
+err:
+ spin_unlock_irqrestore(&a->slock, flags);
+ return ac;
+}
+
+static int32_t q6asm_stream_callback(struct apr_device *adev,
+ struct apr_resp_pkt *data,
+>>>>>>>
+ int session_id)
+{
+ struct q6asm *q6asm = dev_get_drvdata(&adev->dev);
+ struct aprv2_ibasic_rsp_result_t *result;
+<<<<<<<
+ struct apr_hdr *hdr = &data->hdr;
+ struct audio_port_data *port;
+ struct audio_client *ac;
+ uint32_t client_event = 0;
+ int ret = 0;
+=======
+ struct audio_port_data *port;
+ struct audio_client *ac;
+ uint32_t token;
+ uint32_t client_event = 0;
+>>>>>>>
+
+ ac = q6asm_get_audio_client(q6asm, session_id);
+ if (!ac)/* Audio client might already be freed by now */
+ return 0;
+
+<<<<<<<
+ if (!q6asm_is_valid_audio_client(ac))
+ return -EINVAL;
+
+ result = data->payload;
+
+ switch (data->opcode) {
+ case APR_BASIC_RSP_RESULT:
+ token = data->token;
+=======
+ result = data->payload;
+
+ switch (hdr->opcode) {
+ case APR_BASIC_RSP_RESULT:
+>>>>>>>
+ switch (result->opcode) {
+ case ASM_SESSION_CMD_PAUSE:
+ client_event = ASM_CLIENT_EVENT_CMD_PAUSE_DONE;
+ break;
+ case ASM_SESSION_CMD_SUSPEND:
+ client_event = ASM_CLIENT_EVENT_CMD_SUSPEND_DONE;
+ break;
+ case ASM_DATA_CMD_EOS:
+ client_event = ASM_CLIENT_EVENT_CMD_EOS_DONE;
+ break;
+<<<<<<<
+ break;
+ case ASM_STREAM_CMD_FLUSH:
+ client_event = ASM_CLIENT_EVENT_CMD_FLUSH_DONE;
+ break;
+ case ASM_SESSION_CMD_RUN:
+ case ASM_SESSION_CMD_RUN_V2:
+ client_event = ASM_CLIENT_EVENT_CMD_RUN_DONE;
+ break;
+
+ case ASM_STREAM_CMD_FLUSH_READBUFS:
+ if (token != ac->session) {
+ dev_err(ac->dev, "session invalid\n");
+ return -EINVAL;
+ }
+ case ASM_STREAM_CMD_CLOSE:
+ client_event = ASM_CLIENT_EVENT_CMD_CLOSE_DONE;
+ break;
+ case ASM_STREAM_CMD_OPEN_WRITE:
+ case ASM_STREAM_CMD_OPEN_WRITE_V2_1:
+=======
+ case ASM_STREAM_CMD_FLUSH:
+ client_event = ASM_CLIENT_EVENT_CMD_FLUSH_DONE;
+ break;
+ case ASM_SESSION_CMD_RUN_V2:
+ client_event = ASM_CLIENT_EVENT_CMD_RUN_DONE;
+ break;
+ case ASM_STREAM_CMD_CLOSE:
+ client_event = ASM_CLIENT_EVENT_CMD_CLOSE_DONE;
+ break;
+ case ASM_STREAM_CMD_FLUSH_READBUFS:
+ client_event = ASM_CLIENT_EVENT_CMD_OUT_FLUSH_DONE;
+ break;
+>>>>>>>
+ case ASM_STREAM_CMD_OPEN_WRITE_V3:
+ case ASM_STREAM_CMD_OPEN_READ_V3:
+ case ASM_STREAM_CMD_OPEN_READWRITE_V2:
+ case ASM_STREAM_CMD_SET_ENCDEC_PARAM:
+<<<<<<<
+=======
+ case ASM_DATA_CMD_MEDIA_FORMAT_UPDATE:
+>>>>>>>
+ case ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2:
+ if (result->status != 0) {
+ dev_err(ac->dev,
+ "cmd = 0x%x returned error = 0x%x\n",
+ result->opcode, result->status);
+<<<<<<<
+ ac->cmd_state = -result->status;
+ wake_up(&ac->cmd_wait);
+ return 0;
+=======
+ ac->result = *result;
+ wake_up(&ac->cmd_wait);
+ ret = 0;
+ goto done;
+>>>>>>>
+ }
+ break;
+ default:
+ dev_err(ac->dev, "command[0x%x] not expecting rsp\n",
+ result->opcode);
+ break;
+ }
+
+<<<<<<<
+ ac->result = *result;
+ wake_up(&ac->cmd_wait);
+
+ if (ac->cb)
+ ac->cb(client_event, hdr->token,
+ data->payload, ac->priv);
+
+ ret = 0;
+ goto done;
+
+ case ASM_DATA_EVENT_WRITE_DONE_V2:
+ client_event = ASM_CLIENT_EVENT_DATA_WRITE_DONE;
+ if (ac->io_mode & ASM_SYNC_IO_MODE) {
+ phys_addr_t phys;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ac->lock, flags);
+
+ port = &ac->port[SNDRV_PCM_STREAM_PLAYBACK];
+
+ if (!port->buf) {
+ spin_unlock_irqrestore(&ac->lock, flags);
+ ret = 0;
+ goto done;
+ }
+
+ phys = port->buf[hdr->token].phys;
+=======
+ if (ac->cmd_state) {
+ ac->cmd_state = 0;
+ wake_up(&ac->cmd_wait);
+ }
+ if (ac->cb)
+ ac->cb(client_event, data->token,
+ data->payload, ac->priv);
+
+ return 0;
+
+ case ASM_DATA_EVENT_WRITE_DONE:
+ case ASM_DATA_EVENT_WRITE_DONE_V2:
+ port = &ac->port[SNDRV_PCM_STREAM_PLAYBACK];
+
+ client_event = ASM_CLIENT_EVENT_DATA_WRITE_DONE;
+
+ if (ac->io_mode & ASM_SYNC_IO_MODE) {
+ phys_addr_t phys = port->buf[data->token].phys;
+>>>>>>>
+
+ if (lower_32_bits(phys) != result->opcode ||
+ upper_32_bits(phys) != result->status) {
+ dev_err(ac->dev, "Expected addr %pa\n",
+<<<<<<<
+ &port->buf[data->token].phys);
+ return -EINVAL;
+ }
+ token = data->token;
+ port->buf[token].used = 1;
+ }
+ break;
+ case ASM_DATA_EVENT_READ_DONE_V2: {
+ struct asm_data_cmd_read_v2_done *done = data->payload;
+ port = &ac->port[SNDRV_PCM_STREAM_CAPTURE];
+ client_event = ASM_CLIENT_EVENT_DATA_READ_DONE;
+
+ if (ac->io_mode & ASM_SYNC_IO_MODE) {
+ phys_addr_t phys = port->buf[data->token].phys;
+ token = data->token;
+ port->buf[token].used = 0;
+=======
+ &port->buf[hdr->token].phys);
+ spin_unlock_irqrestore(&ac->lock, flags);
+ ret = -EINVAL;
+ goto done;
+ }
+ spin_unlock_irqrestore(&ac->lock, flags);
+ }
+ break;
+ case ASM_DATA_EVENT_READ_DONE_V2:
+ client_event = ASM_CLIENT_EVENT_DATA_READ_DONE;
+ if (ac->io_mode & ASM_SYNC_IO_MODE) {
+ struct asm_data_cmd_read_v2_done *done = data->payload;
+ unsigned long flags;
+ phys_addr_t phys;
+
+ spin_lock_irqsave(&ac->lock, flags);
+ port = &ac->port[SNDRV_PCM_STREAM_CAPTURE];
+ if (!port->buf) {
+ spin_unlock_irqrestore(&ac->lock, flags);
+ ret = 0;
+ goto done;
+ }
+
+ phys = port->buf[hdr->token].phys;
+>>>>>>>
+
+ if (upper_32_bits(phys) != done->buf_addr_msw ||
+ lower_32_bits(phys) != done->buf_addr_lsw) {
+ dev_err(ac->dev, "Expected addr %pa %08x-%08x\n",
+<<<<<<<
+ &port->buf[data->token].phys, done->buf_addr_lsw, done->buf_addr_msw);
+ break;
+ }
+ }
+ }
+=======
+ &port->buf[hdr->token].phys,
+ done->buf_addr_lsw,
+ done->buf_addr_msw);
+ spin_unlock_irqrestore(&ac->lock, flags);
+ ret = -EINVAL;
+ goto done;
+ }
+ spin_unlock_irqrestore(&ac->lock, flags);
+ }
+>>>>>>>
+
+ break;
+ }
+
+ if (ac->cb)
+<<<<<<<
+ ac->cb(client_event, data->token, data->payload, ac->priv);
+
+ return 0;
+}
+
+static int q6asm_srvc_callback(struct apr_device *adev,
+ struct apr_client_message *data)
+{
+ struct aprv2_ibasic_rsp_result_t *result;
+ struct q6asm *a, *q6asm = dev_get_drvdata(&adev->dev);
+ struct audio_client *ac = NULL;
+ struct audio_port_data *port;
+ uint32_t dir = 0;
+ uint32_t sid = 0;
+ int session_id;
+
+ session_id = (data->dest_port >> 8) & 0xFF;
+ if (session_id)
+ return q6asm_stream_callback(adev, data, session_id);
+
+ result = data->payload;
+ sid = (data->token >> 8) & 0x0F;
+=======
+ ac->cb(client_event, hdr->token, data->payload, ac->priv);
+
+done:
+ kref_put(&ac->refcount, q6asm_audio_client_release);
+ return ret;
+}
+
+static int q6asm_srvc_callback(struct apr_device *adev,
+ struct apr_resp_pkt *data)
+{
+ struct q6asm *q6asm = dev_get_drvdata(&adev->dev);
+ struct aprv2_ibasic_rsp_result_t *result;
+ struct audio_port_data *port;
+ struct audio_client *ac = NULL;
+ struct apr_hdr *hdr = &data->hdr;
+ struct q6asm *a;
+ uint32_t sid = 0;
+ uint32_t dir = 0;
+ int session_id;
+
+ session_id = (hdr->dest_port >> 8) & 0xFF;
+ if (session_id)
+ return q6asm_stream_callback(adev, data, session_id);
+
+ sid = (hdr->token >> 8) & 0x0F;
+>>>>>>>
+ ac = q6asm_get_audio_client(q6asm, sid);
+ if (!ac) {
+ dev_err(&adev->dev, "Audio Client not active\n");
+ return 0;
+ }
+
+<<<<<<<
+ a = dev_get_drvdata(ac->dev);
+ dir = (data->token & 0x0F);
+ port = &ac->port[dir];
+
+ switch (data->opcode)
+ case APR_BASIC_RSP_RESULT: {
+ switch (result->opcode) {
+ case ASM_SESSION_CMD_MEMORY_MAP_REGIONS:
+ a->mem_state = 0;
+ ac->port[dir].mem_map_handle = result->opcode;
+ wake_up(&a->mem_wait);
+ break;
+
+ case ASM_SESSION_CMD_MEMORY_UNMAP_REGIONS:
+ a->mem_state = 0;
+ ac->port[dir].mem_map_handle = 0;
+ wake_up(&a->mem_wait);
+ break;
+ case ASM_CMD_SHARED_MEM_MAP_REGIONS:
+ case ASM_CMD_SHARED_MEM_UNMAP_REGIONS:
+ if (result->status != 0) {
+ dev_err(ac->dev,
+ "cmd = 0x%x retur err= 0x%x sid:%d\n",
+ result->opcode, result->status, sid);
+ a->mem_state = -result->status;
+ } else {
+ a->mem_state = 0;
+ }
+
+=======
+ a = dev_get_drvdata(ac->dev->parent);
+ dir = (hdr->token & 0x0F);
+ port = &ac->port[dir];
+ result = data->payload;
+
+ switch (hdr->opcode) {
+ case APR_BASIC_RSP_RESULT:
+ switch (result->opcode) {
+ case ASM_CMD_SHARED_MEM_MAP_REGIONS:
+ case ASM_CMD_SHARED_MEM_UNMAP_REGIONS:
+ ac->result = *result;
+>>>>>>>
+ wake_up(&a->mem_wait);
+ break;
+ default:
+ dev_err(&adev->dev, "command[0x%x] not expecting rsp\n",
+ result->opcode);
+ break;
+ }
+<<<<<<<
+ goto done;
+ case ASM_CMDRSP_SHARED_MEM_MAP_REGIONS:
+ ac->result.status = 0;
+ ac->result.opcode = hdr->opcode;
+ port->mem_map_handle = result->opcode;
+ wake_up(&a->mem_wait);
+ break;
+ case ASM_CMD_SHARED_MEM_UNMAP_REGIONS:
+ ac->result.opcode = hdr->opcode;
+ ac->result.status = 0;
+ port->mem_map_handle = 0;
+=======
+ return 0;
+ case ASM_CMDRSP_SHARED_MEM_MAP_REGIONS:
+ a->mem_state = 0;
+ ac->port[dir].mem_map_handle = result->opcode;
+ wake_up(&a->mem_wait);
+ break;
+ case ASM_CMD_SHARED_MEM_UNMAP_REGIONS:
+ case ASM_SESSION_CMD_MEMORY_UNMAP_REGIONS:
+ a->mem_state = 0;
+ ac->port[dir].mem_map_handle = 0;
+>>>>>>>
+ wake_up(&a->mem_wait);
+ break;
+ default:
+ dev_dbg(&adev->dev, "command[0x%x]success [0x%x]\n",
+ result->opcode, result->status);
+ break;
+ }
+
+ if (ac->cb)
+<<<<<<<
+ ac->cb(data->opcode, data->token, data->payload, ac->priv);
+=======
+ ac->cb(hdr->opcode, hdr->token, data->payload, ac->priv);
+
+done:
+ kref_put(&ac->refcount, q6asm_audio_client_release);
+>>>>>>>
+
+ return 0;
+}
+
+/**
+ * q6asm_get_session_id() - get session id for audio client
+ *
+<<<<<<<
+ * @ac: audio client pointer
+=======
+ * @c: audio client pointer
+>>>>>>>
+ *
+ * Return: Will be an session id of the audio client.
+ */
+int q6asm_get_session_id(struct audio_client *c)
+{
+ return c->session;
+}
+EXPORT_SYMBOL_GPL(q6asm_get_session_id);
+
+/**
+ * q6asm_audio_client_alloc() - Allocate a new audio client
+ *
+ * @dev: Pointer to asm child device.
+ * @cb: event callback.
+ * @priv: private data associated with this client.
+<<<<<<<
+=======
+ * @stream_id: stream id
+ * @perf_mode: performace mode for this client
+>>>>>>>
+ *
+ * Return: Will be an error pointer on error or a valid audio client
+ * on success.
+ */
+struct audio_client *q6asm_audio_client_alloc(struct device *dev, q6asm_cb cb,
+<<<<<<<
+ void *priv, int stream_id)
+{
+ struct q6asm *a = dev_get_drvdata(dev);
+ struct audio_client *ac;
+
+ if (stream_id + 1 > MAX_SESSIONS)
+ return ERR_PTR(-EINVAL);
+=======
+ void *priv, int stream_id,
+ int perf_mode)
+{
+ struct q6asm *a = dev_get_drvdata(dev->parent);
+ struct audio_client *ac;
+ unsigned long flags;
+
+ ac = q6asm_get_audio_client(a, stream_id + 1);
+ if (ac) {
+ dev_err(dev, "Audio Client already active\n");
+ return ac;
+ }
+>>>>>>>
+
+ ac = kzalloc(sizeof(*ac), GFP_KERNEL);
+ if (!ac)
+ return ERR_PTR(-ENOMEM);
+
+<<<<<<<
+ mutex_lock(&a->session_lock);
+ a->session[stream_id + 1] = ac;
+ mutex_unlock(&a->session_lock);
+
+ ac->session = stream_id + 1;
+ ac->cb = cb;
+ ac->dev = dev;
+ ac->priv = priv;
+ ac->io_mode = ASM_SYNC_IO_MODE;
+ ac->perf_mode = LEGACY_PCM_MODE;
+ /* DSP expects stream id from 1 */
+ ac->stream_id = 1;
+ ac->adev = a->adev;
+
+ init_waitqueue_head(&ac->cmd_wait);
+ mutex_init(&ac->lock);
+ ac->cmd_state = 0;
+=======
+ spin_lock_irqsave(&a->slock, flags);
+ a->session[stream_id + 1] = ac;
+ spin_unlock_irqrestore(&a->slock, flags);
+ ac->session = stream_id + 1;
+ ac->cb = cb;
+ ac->dev = dev;
+ ac->q6asm = a;
+ ac->priv = priv;
+ ac->io_mode = ASM_SYNC_IO_MODE;
+ ac->perf_mode = perf_mode;
+ /* DSP expects stream id from 1 */
+ ac->stream_id = 1;
+ ac->adev = a->adev;
+ kref_init(&ac->refcount);
+
+ init_waitqueue_head(&ac->cmd_wait);
+ mutex_init(&ac->cmd_lock);
+ spin_lock_init(&ac->lock);
+>>>>>>>
+
+ return ac;
+}
+EXPORT_SYMBOL_GPL(q6asm_audio_client_alloc);
+
+<<<<<<<
+static int q6asm_ac_send_cmd_sync(struct audio_client *ac, struct apr_pkt *pkt)
+{
+ struct apr_hdr *hdr = &pkt->hdr;
+ int rc;
+
+ mutex_lock(&ac->cmd_lock);
+ ac->result.opcode = 0;
+ ac->result.status = 0;
+
+ rc = apr_send_pkt(ac->adev, pkt);
+ if (rc < 0)
+ goto err;
+
+ rc = wait_event_timeout(ac->cmd_wait,
+ (ac->result.opcode == hdr->opcode), 5 * HZ);
+=======
+static int q6asm_ac_send_cmd_sync(struct audio_client *ac, void *cmd)
+{
+ int rc;
+
+ mutex_lock(&ac->lock);
+ ac->cmd_state = 1;
+
+ rc = apr_send_pkt(ac->adev, cmd);
+ if (rc < 0)
+ goto err;
+
+ rc = wait_event_timeout(ac->cmd_wait, (ac->cmd_state <= 0), 5 * HZ);
+>>>>>>>
+ if (!rc) {
+ dev_err(ac->dev, "CMD timeout\n");
+ rc = -ETIMEDOUT;
+ goto err;
+ }
+
+<<<<<<<
+ if (ac->cmd_state > 0)
+ rc = q6dsp_errno(ac->cmd_state);
+
+err:
+ mutex_unlock(&ac->lock);
+ return rc;
+}
+
+int q6asm_open_write_v3(struct audio_client *ac, uint32_t format,
+ uint16_t bits_per_sample)
+{
+ struct asm_stream_cmd_open_write_v3 open;
+ int rc;
+
+ q6asm_add_hdr(ac, &open.hdr, sizeof(open), true, ac->stream_id);
+
+ open.hdr.opcode = ASM_STREAM_CMD_OPEN_WRITE_V3;
+ open.mode_flags = 0x00;
+ open.mode_flags |= ASM_LEGACY_STREAM_SESSION;
+
+ /* source endpoint : matrix */
+ open.sink_endpointype = ASM_END_POINT_DEVICE_MATRIX;
+ open.bits_per_sample = bits_per_sample;
+ open.postprocopo_id = ASM_DEFAULT_POPP_TOPOLOGY;
+
+ switch (format) {
+ case FORMAT_LINEAR_PCM:
+ open.dec_fmt_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2;
+ break;
+ default:
+ dev_err(ac->dev, "Invalid format 0x%x\n", format);
+ return -EINVAL;
+ }
+
+ rc = q6asm_ac_send_cmd_sync(ac, &open);
+ if (rc < 0)
+ return rc;
+
+ ac->io_mode |= ASM_TUN_WRITE_IO_MODE;
+
+ return 0;
+}
+
+int q6asm_open_write_v1(struct audio_client *ac, uint32_t format,
+ uint16_t bits_per_sample)
+{
+ struct asm_stream_cmd_open_write_v1 open;
+ int rc;
+
+ q6asm_add_hdr(ac, &open.hdr, sizeof(open), true, ac->stream_id);
+
+ open.hdr.opcode = ASM_STREAM_CMD_OPEN_WRITE;
+ open.uMode = STREAM_PRIORITY_HIGH;
+
+ /* source endpoint : matrix */
+ open.sink_endpoint = ASM_END_POINT_DEVICE_MATRIX;
+ open.stream_handle = 0x00;
+ open.post_proc_top = ASM_DEFAULT_POPP_TOPOLOGY;
+
+ switch (format) {
+ case FORMAT_LINEAR_PCM:
+ open.format = LINEAR_PCM;
+ break;
+ default:
+ dev_err(ac->dev, "Invalid format 0x%x\n", format);
+ return -EINVAL;
+ }
+
+ rc = q6asm_ac_send_cmd_sync(ac, &open);
+ if (rc < 0)
+ return rc;
+
+ ac->io_mode |= ASM_TUN_WRITE_IO_MODE;
+
+ return 0;
+}
+/**
+ * q6asm_open_write() - Open audio client for writing
+ *
+ * @ac: audio client pointer
+ * @format: audio sample format
+ * @bits_per_sample: bits per sample
+ *
+ * Return: Will be an negative value on error or zero on success
+ */
+int q6asm_open_write(struct audio_client *ac, uint32_t format,
+ uint16_t bits_per_sample)
+{
+ struct q6asm *a = dev_get_drvdata(ac->dev);
+
+ return a->ops->open_write(ac, format, bits_per_sample);
+
+}
+EXPORT_SYMBOL_GPL(q6asm_open_write);
+
+static int __q6asm_run_v2(struct audio_client *ac, uint32_t flags,
+ uint32_t msw_ts, uint32_t lsw_ts, bool wait)
+{
+ struct asm_session_cmd_run_v2 run;
+
+ q6asm_add_hdr(ac, &run.hdr, sizeof(run), true, ac->stream_id);
+
+ run.hdr.opcode = ASM_SESSION_CMD_RUN_V2;
+ run.flags = flags;
+ run.time_lsw = lsw_ts;
+ run.time_msw = msw_ts;
+ if (wait)
+ return q6asm_ac_send_cmd_sync(ac, &run);
+ else
+ return apr_send_pkt(ac->adev, &run);
+
+}
+
+static int __q6asm_run_v1(struct audio_client *ac, uint32_t flags,
+ uint32_t msw_ts, uint32_t lsw_ts, bool wait)
+{
+ struct asm_stream_cmd_run_v1 run;
+
+ q6asm_add_hdr(ac, &run.hdr, sizeof(run), true, ac->stream_id);
+
+ run.hdr.opcode = ASM_SESSION_CMD_RUN;
+ run.flags = flags;
+ run.lsw_ts = lsw_ts;
+ run.msw_ts = msw_ts;
+ if (wait)
+ return q6asm_ac_send_cmd_sync(ac, &run);
+ else
+ return apr_send_pkt(ac->adev, &run);
+
+=======
+ if (ac->result.status > 0) {
+ dev_err(ac->dev, "DSP returned error[%x]\n",
+ ac->result.status);
+ rc = -EINVAL;
+ } else {
+ rc = 0;
+ }
+
+
+err:
+ mutex_unlock(&ac->cmd_lock);
+ return rc;
+}
+
+/**
+ * q6asm_open_write() - Open audio client for writing
+ *
+ * @ac: audio client pointer
+ * @format: audio sample format
+ * @bits_per_sample: bits per sample
+ *
+ * Return: Will be an negative value on error or zero on success
+ */
+int q6asm_open_write(struct audio_client *ac, uint32_t format,
+ uint16_t bits_per_sample)
+{
+ struct asm_stream_cmd_open_write_v3 *open;
+ struct apr_pkt *pkt;
+ void *p;
+ int rc, pkt_size;
+
+ pkt_size = APR_HDR_SIZE + sizeof(*open);
+
+ p = kzalloc(pkt_size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ open = p + APR_HDR_SIZE;
+ q6asm_add_hdr(ac, &pkt->hdr, pkt_size, true, ac->stream_id);
+
+ pkt->hdr.opcode = ASM_STREAM_CMD_OPEN_WRITE_V3;
+ open->mode_flags = 0x00;
+ open->mode_flags |= ASM_LEGACY_STREAM_SESSION;
+
+ /* source endpoint : matrix */
+ open->sink_endpointype = ASM_END_POINT_DEVICE_MATRIX;
+ open->bits_per_sample = bits_per_sample;
+ open->postprocopo_id = ASM_NULL_POPP_TOPOLOGY;
+
+ switch (format) {
+ case FORMAT_LINEAR_PCM:
+ open->dec_fmt_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2;
+ break;
+ default:
+ dev_err(ac->dev, "Invalid format 0x%x\n", format);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ rc = q6asm_ac_send_cmd_sync(ac, pkt);
+ if (rc < 0)
+ goto err;
+
+ ac->io_mode |= ASM_TUN_WRITE_IO_MODE;
+
+err:
+ kfree(pkt);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(q6asm_open_write);
+
+static int __q6asm_run(struct audio_client *ac, uint32_t flags,
+ uint32_t msw_ts, uint32_t lsw_ts, bool wait)
+{
+ struct asm_session_cmd_run_v2 *run;
+ struct apr_pkt *pkt;
+ int pkt_size, rc;
+ void *p;
+
+ pkt_size = APR_HDR_SIZE + sizeof(*run);
+ p = kzalloc(pkt_size, GFP_ATOMIC);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ run = p + APR_HDR_SIZE;
+
+ q6asm_add_hdr(ac, &pkt->hdr, pkt_size, true, ac->stream_id);
+
+ pkt->hdr.opcode = ASM_SESSION_CMD_RUN_V2;
+ run->flags = flags;
+ run->time_lsw = lsw_ts;
+ run->time_msw = msw_ts;
+ if (wait) {
+ rc = q6asm_ac_send_cmd_sync(ac, pkt);
+ } else {
+ rc = apr_send_pkt(ac->adev, pkt);
+ if (rc == pkt_size)
+ rc = 0;
+ }
+
+ kfree(pkt);
+ return rc;
+>>>>>>>
+}
+
+/**
+ * q6asm_run() - start the audio client
+ *
+ * @ac: audio client pointer
+ * @flags: flags associated with write
+ * @msw_ts: timestamp msw
+ * @lsw_ts: timestamp lsw
+ *
+ * Return: Will be an negative value on error or zero on success
+ */
+int q6asm_run(struct audio_client *ac, uint32_t flags,
+ uint32_t msw_ts, uint32_t lsw_ts)
+{
+<<<<<<<
+ return __q6asm_run(ac, flags, msw_ts, lsw_ts, true);
+=======
+ struct q6asm *a = dev_get_drvdata(ac->dev);
+
+ return a->ops->run(ac, flags, msw_ts, lsw_ts, true);
+>>>>>>>
+}
+EXPORT_SYMBOL_GPL(q6asm_run);
+
+/**
+ * q6asm_run_nowait() - start the audio client withou blocking
+ *
+ * @ac: audio client pointer
+ * @flags: flags associated with write
+ * @msw_ts: timestamp msw
+ * @lsw_ts: timestamp lsw
+ *
+ * Return: Will be an negative value on error or zero on success
+ */
+int q6asm_run_nowait(struct audio_client *ac, uint32_t flags,
+ uint32_t msw_ts, uint32_t lsw_ts)
+{
+<<<<<<<
+ return __q6asm_run(ac, flags, msw_ts, lsw_ts, false);
+}
+EXPORT_SYMBOL_GPL(q6asm_run_nowait);
+
+/**
+ * q6asm_media_format_block_multi_ch_pcm() - setup pcm configuration
+ *
+ * @ac: audio client pointer
+ * @rate: audio sample rate
+ * @channels: number of audio channels.
+ * @channel_map: channel map pointer
+ * @bits_per_sample: bits per sample
+ *
+ * Return: Will be an negative value on error or zero on success
+ */
+int q6asm_media_format_block_multi_ch_pcm(struct audio_client *ac,
+ uint32_t rate, uint32_t channels,
+ u8 channel_map[PCM_MAX_NUM_CHANNEL],
+ uint16_t bits_per_sample)
+{
+ struct asm_multi_channel_pcm_fmt_blk_v2 *fmt;
+ struct apr_pkt *pkt;
+ u8 *channel_mapping;
+ void *p;
+ int rc, pkt_size;
+
+ pkt_size = APR_HDR_SIZE + sizeof(*fmt);
+ p = kzalloc(pkt_size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ fmt = p + APR_HDR_SIZE;
+
+ q6asm_add_hdr(ac, &pkt->hdr, pkt_size, true, ac->stream_id);
+
+ pkt->hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+ fmt->fmt_blk.fmt_blk_size = sizeof(*fmt) - sizeof(fmt->fmt_blk);
+ fmt->num_channels = channels;
+ fmt->bits_per_sample = bits_per_sample;
+ fmt->sample_rate = rate;
+ fmt->is_signed = 1;
+
+ channel_mapping = fmt->channel_mapping;
+
+ if (channel_map) {
+ memcpy(channel_mapping, channel_map, PCM_MAX_NUM_CHANNEL);
+ } else {
+ if (q6dsp_map_channels(channel_mapping, channels)) {
+ dev_err(ac->dev, " map channels failed %d\n", channels);
+ rc = -EINVAL;
+ goto err;
+ }
+ }
+
+ rc = q6asm_ac_send_cmd_sync(ac, pkt);
+
+err:
+ kfree(pkt);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(q6asm_media_format_block_multi_ch_pcm);
+
+/**
+ * q6asm_enc_cfg_blk_pcm_format_support() - setup pcm configuration for capture
+=======
+ struct q6asm *a = dev_get_drvdata(ac->dev);
+
+ return a->ops->run(ac, flags, msw_ts, lsw_ts, false);
+}
+EXPORT_SYMBOL_GPL(q6asm_run_nowait);
+
+int q6asm_media_format_block_multi_ch_pcm_v2(struct audio_client *ac,
+ uint32_t rate, uint32_t channels,
+ u8 channel_map[PCM_FORMAT_MAX_NUM_CHANNEL],
+ uint16_t bits_per_sample)
+{
+ struct asm_multi_channel_pcm_fmt_blk_v2 fmt;
+ u8 *channel_mapping;
+ int rc;
+
+ q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), true, ac->stream_id);
+
+ fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+ fmt.fmt_blk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
+ sizeof(fmt.fmt_blk);
+ fmt.num_channels = channels;
+ fmt.bits_per_sample = bits_per_sample;
+ fmt.sample_rate = rate;
+ fmt.is_signed = 1;
+
+ channel_mapping = fmt.channel_mapping;
+
+ if (channel_map) {
+ memcpy(channel_mapping, channel_map,
+ PCM_FORMAT_MAX_NUM_CHANNEL);
+ } else {
+ if (q6dsp_map_channels(channel_mapping, channels)) {
+ dev_err(ac->dev, " map channels failed %d\n", channels);
+ return -EINVAL;
+ }
+ }
+
+ rc = q6asm_ac_send_cmd_sync(ac, &fmt);
+ if (rc < 0)
+ goto fail_cmd;
+
+ return 0;
+fail_cmd:
+ return rc;
+}
+
+int q6asm_media_format_block_multi_ch_pcm_v1(struct audio_client *ac,
+ uint32_t rate, uint32_t channels,
+ u8 channel_map[PCM_FORMAT_MAX_NUM_CHANNEL],
+ uint16_t bits_per_sample)
+{
+ struct asm_stream_media_format_update_v1 fmt;
+ int rc;
+
+ q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), true, ac->stream_id);
+
+ fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FORMAT_UPDATE;
+
+ fmt.format = LINEAR_PCM;
+ fmt.cfg_size = sizeof(struct asm_pcm_cfg);
+ fmt.write_cfg.pcm_cfg.ch_cfg = channels;
+ fmt.write_cfg.pcm_cfg.bits_per_sample = bits_per_sample; //FIXME 16
+ fmt.write_cfg.pcm_cfg.sample_rate = rate;
+ fmt.write_cfg.pcm_cfg.is_signed = 1;
+ fmt.write_cfg.pcm_cfg.interleaved = 1;
+
+ rc = q6asm_ac_send_cmd_sync(ac, &fmt);
+ if (rc < 0)
+ goto fail_cmd;
+
+ return 0;
+fail_cmd:
+ return rc;
+}
+/**
+ * q6asm_media_format_block_multi_ch_pcm() - setup pcm configuration
+>>>>>>>
+ *
+ * @ac: audio client pointer
+ * @rate: audio sample rate
+ * @channels: number of audio channels.
+<<<<<<<
+=======
+ * @use_default_chmap: flag to use default ch map.
+ * @channel_map: channel map pointer
+>>>>>>>
+ * @bits_per_sample: bits per sample
+ *
+ * Return: Will be an negative value on error or zero on success
+ */
+<<<<<<<
+int q6asm_enc_cfg_blk_pcm_format_support(struct audio_client *ac,
+ uint32_t rate, uint32_t channels, uint16_t bits_per_sample)
+{
+ struct asm_multi_channel_pcm_enc_cfg_v2 *enc_cfg;
+ struct apr_pkt *pkt;
+ u8 *channel_mapping;
+ u32 frames_per_buf = 0;
+ int pkt_size, rc;
+ void *p;
+
+ pkt_size = APR_HDR_SIZE + sizeof(*enc_cfg);
+ p = kzalloc(pkt_size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ enc_cfg = p + APR_HDR_SIZE;
+ q6asm_add_hdr(ac, &pkt->hdr, pkt_size, true, ac->stream_id);
+
+ pkt->hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+ enc_cfg->encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
+ enc_cfg->encdec.param_size = sizeof(*enc_cfg) - sizeof(enc_cfg->encdec);
+ enc_cfg->encblk.frames_per_buf = frames_per_buf;
+ enc_cfg->encblk.enc_cfg_blk_size = enc_cfg->encdec.param_size -
+ sizeof(struct asm_enc_cfg_blk_param_v2);
+
+ enc_cfg->num_channels = channels;
+ enc_cfg->bits_per_sample = bits_per_sample;
+ enc_cfg->sample_rate = rate;
+ enc_cfg->is_signed = 1;
+ channel_mapping = enc_cfg->channel_mapping;
+
+ if (q6dsp_map_channels(channel_mapping, channels)) {
+ rc = -EINVAL;
+ goto err;
+ }
+
+ rc = q6asm_ac_send_cmd_sync(ac, pkt);
+err:
+ kfree(pkt);
+ return rc;
+=======
+int q6asm_media_format_block_multi_ch_pcm(struct audio_client *ac,
+ uint32_t rate, uint32_t channels,
+ u8 channel_map[PCM_FORMAT_MAX_NUM_CHANNEL],
+ uint16_t bits_per_sample)
+{
+ struct q6asm *a = dev_get_drvdata(ac->dev);
+
+ return a->ops->format_block_multi_ch_pcm(ac, rate, channels,
+ channel_map,
+ bits_per_sample);
+}
+EXPORT_SYMBOL_GPL(q6asm_media_format_block_multi_ch_pcm);
+
+static int __q6asm_enc_cfg_blk_pcm(struct audio_client *ac,
+ uint32_t rate, uint32_t channels, uint16_t bits_per_sample)
+{
+ struct asm_multi_channel_pcm_enc_cfg_v2 enc_cfg;
+ u8 *channel_mapping;
+ u32 frames_per_buf = 0;
+
+ q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), true, ac->stream_id);
+ enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+ enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
+ enc_cfg.encdec.param_size = sizeof(enc_cfg) - sizeof(enc_cfg.hdr) -
+ sizeof(enc_cfg.encdec);
+ enc_cfg.encblk.frames_per_buf = frames_per_buf;
+ enc_cfg.encblk.enc_cfg_blk_size = enc_cfg.encdec.param_size -
+ sizeof(struct asm_enc_cfg_blk_param_v2);
+
+ enc_cfg.num_channels = channels;
+ enc_cfg.bits_per_sample = bits_per_sample;
+ enc_cfg.sample_rate = rate;
+ enc_cfg.is_signed = 1;
+ channel_mapping = enc_cfg.channel_mapping;
+
+ memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
+
+ if (q6dsp_map_channels(channel_mapping, channels))
+ return -EINVAL;
+
+
+ return q6asm_ac_send_cmd_sync(ac, &enc_cfg);
+
+}
+/**
+ * q6asm_enc_cfg_blk_pcm_format_support() - setup pcm configuration for capture
+ *
+ * @ac: audio client pointer
+ * @rate: audio sample rate
+ * @channels: number of audio channels.
+ * @use_default_chmap: flag to use default ch map.
+ * @channel_map: channel map pointer
+ * @bits_per_sample: bits per sample
+ *
+ * Return: Will be an negative value on error or zero on success
+ */
+int q6asm_enc_cfg_blk_pcm_format_support(struct audio_client *ac,
+ uint32_t rate, uint32_t channels, uint16_t bits_per_sample)
+{
+ return __q6asm_enc_cfg_blk_pcm(ac, rate, channels, bits_per_sample);
+>>>>>>>
+}
+EXPORT_SYMBOL_GPL(q6asm_enc_cfg_blk_pcm_format_support);
+
+/**
+ * q6asm_read() - read data of period size from audio client
+ *
+ * @ac: audio client pointer
+ *
+ * Return: Will be an negative value on error or zero on success
+ */
+int q6asm_read(struct audio_client *ac)
+{
+<<<<<<<
+ struct asm_data_cmd_read_v2 *read;
+ struct audio_port_data *port;
+ struct audio_buffer *ab;
+ struct apr_pkt *pkt;
+ unsigned long flags;
+ int pkt_size;
+ int rc = 0;
+ void *p;
+
+ pkt_size = APR_HDR_SIZE + sizeof(*read);
+ p = kzalloc(pkt_size, GFP_ATOMIC);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ read = p + APR_HDR_SIZE;
+
+ spin_lock_irqsave(&ac->lock, flags);
+ port = &ac->port[SNDRV_PCM_STREAM_CAPTURE];
+ q6asm_add_hdr(ac, &pkt->hdr, pkt_size, false, ac->stream_id);
+ ab = &port->buf[port->dsp_buf];
+ pkt->hdr.opcode = ASM_DATA_CMD_READ_V2;
+ read->buf_addr_lsw = lower_32_bits(ab->phys);
+ read->buf_addr_msw = upper_32_bits(ab->phys);
+ read->mem_map_handle = port->mem_map_handle;
+
+ read->buf_size = ab->size;
+ read->seq_id = port->dsp_buf;
+ pkt->hdr.token = port->dsp_buf;
+
+ port->dsp_buf++;
+
+ if (port->dsp_buf >= port->num_periods)
+ port->dsp_buf = 0;
+
+ spin_unlock_irqrestore(&ac->lock, flags);
+ rc = apr_send_pkt(ac->adev, pkt);
+ if (rc == pkt_size)
+ rc = 0;
+ else
+ pr_err("read op[0x%x]rc[%d]\n", pkt->hdr.opcode, rc);
+
+ kfree(pkt);
+ return rc;
+=======
+ struct asm_data_cmd_read_v2 read;
+ struct audio_port_data *port;
+ struct audio_buffer *ab;
+ int rc;
+
+ if (!(ac->io_mode & ASM_SYNC_IO_MODE))
+ return 0;
+
+ port = &ac->port[SNDRV_PCM_STREAM_CAPTURE];
+ q6asm_add_hdr(ac, &read.hdr, sizeof(read), false, ac->stream_id);
+ ab = &port->buf[port->dsp_buf];
+
+ read.hdr.opcode = ASM_DATA_CMD_READ_V2;
+ read.buf_addr_lsw = lower_32_bits(ab->phys);
+ read.buf_addr_msw = upper_32_bits(ab->phys);
+ read.mem_map_handle = ac->port[SNDRV_PCM_STREAM_CAPTURE].mem_map_handle;
+
+ read.buf_size = ab->size;
+ read.seq_id = port->dsp_buf;
+ read.hdr.token = port->dsp_buf;
+
+ port->dsp_buf++;
+
+ if (port->dsp_buf >= port->num_periods)
+ port->dsp_buf = 0;
+
+ rc = apr_send_pkt(ac->adev, &read);
+ if (rc < 0) {
+ pr_err("read op[0x%x]rc[%d]\n", read.hdr.opcode, rc);
+ return rc;
+ }
+
+ return 0;
+>>>>>>>
+}
+EXPORT_SYMBOL_GPL(q6asm_read);
+
+static int __q6asm_open_read(struct audio_client *ac,
+ uint32_t format, uint16_t bits_per_sample)
+{
+<<<<<<<
+ struct asm_stream_cmd_open_read_v3 *open;
+ struct apr_pkt *pkt;
+ int pkt_size, rc;
+ void *p;
+
+ pkt_size = APR_HDR_SIZE + sizeof(*open);
+ p = kzalloc(pkt_size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ open = p + APR_HDR_SIZE;
+
+ q6asm_add_hdr(ac, &pkt->hdr, pkt_size, true, ac->stream_id);
+ pkt->hdr.opcode = ASM_STREAM_CMD_OPEN_READ_V3;
+ /* Stream prio : High, provide meta info with encoded frames */
+ open->src_endpointype = ASM_END_POINT_DEVICE_MATRIX;
+
+ open->preprocopo_id = ASM_STREAM_POSTPROC_TOPO_ID_NONE;
+ open->bits_per_sample = bits_per_sample;
+ open->mode_flags = 0x0;
+
+ open->mode_flags |= ASM_LEGACY_STREAM_SESSION <<
+=======
+ struct asm_stream_cmd_open_read_v3 open;
+
+ q6asm_add_hdr(ac, &open.hdr, sizeof(open), true, ac->stream_id);
+ open.hdr.opcode = ASM_STREAM_CMD_OPEN_READ_V3;
+ /* Stream prio : High, provide meta info with encoded frames */
+ open.src_endpointype = ASM_END_POINT_DEVICE_MATRIX;
+
+ open.preprocopo_id = ASM_STREAM_POSTPROC_TOPO_ID_NONE;
+ open.bits_per_sample = bits_per_sample;
+ open.mode_flags = 0x0;
+
+ open.mode_flags |= ASM_LEGACY_STREAM_SESSION <<
+>>>>>>>
+ ASM_SHIFT_STREAM_PERF_MODE_FLAG_IN_OPEN_READ;
+
+ switch (format) {
+ case FORMAT_LINEAR_PCM:
+<<<<<<<
+ open->mode_flags |= 0x00;
+ open->enc_cfg_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2;
+=======
+ open.mode_flags |= 0x00;
+ open.enc_cfg_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2;
+>>>>>>>
+ break;
+ default:
+ pr_err("Invalid format[%d]\n", format);
+ }
+
+<<<<<<<
+ rc = q6asm_ac_send_cmd_sync(ac, pkt);
+
+ kfree(pkt);
+ return rc;
+=======
+ return q6asm_ac_send_cmd_sync(ac, &open);
+>>>>>>>
+}
+
+/**
+ * q6asm_open_read() - Open audio client for reading
+ *
+ * @ac: audio client pointer
+ * @format: audio sample format
+ * @bits_per_sample: bits per sample
+ *
+ * Return: Will be an negative value on error or zero on success
+ */
+int q6asm_open_read(struct audio_client *ac, uint32_t format,
+ uint16_t bits_per_sample)
+{
+ return __q6asm_open_read(ac, format, bits_per_sample);
+}
+EXPORT_SYMBOL_GPL(q6asm_open_read);
+
+<<<<<<<
+/**
+ * q6asm_write_async() - non blocking write
+ *
+ * @ac: audio client pointer
+ * @len: lenght in bytes
+ * @msw_ts: timestamp msw
+ * @lsw_ts: timestamp lsw
+ * @wflags: flags associated with write
+ *
+ * Return: Will be an negative value on error or zero on success
+ */
+int q6asm_write_async(struct audio_client *ac, uint32_t len, uint32_t msw_ts,
+ uint32_t lsw_ts, uint32_t wflags)
+{
+ struct asm_data_cmd_write_v2 *write;
+ struct audio_port_data *port;
+ struct audio_buffer *ab;
+ unsigned long flags;
+ struct apr_pkt *pkt;
+ int pkt_size;
+ int rc = 0;
+ void *p;
+
+ pkt_size = APR_HDR_SIZE + sizeof(*write);
+ p = kzalloc(pkt_size, GFP_ATOMIC);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ write = p + APR_HDR_SIZE;
+
+ spin_lock_irqsave(&ac->lock, flags);
+ port = &ac->port[SNDRV_PCM_STREAM_PLAYBACK];
+ q6asm_add_hdr(ac, &pkt->hdr, pkt_size, false, ac->stream_id);
+
+ ab = &port->buf[port->dsp_buf];
+ pkt->hdr.token = port->dsp_buf;
+ pkt->hdr.opcode = ASM_DATA_CMD_WRITE_V2;
+ write->buf_addr_lsw = lower_32_bits(ab->phys);
+ write->buf_addr_msw = upper_32_bits(ab->phys);
+ write->buf_size = len;
+ write->seq_id = port->dsp_buf;
+ write->timestamp_lsw = lsw_ts;
+ write->timestamp_msw = msw_ts;
+ write->mem_map_handle =
+ ac->port[SNDRV_PCM_STREAM_PLAYBACK].mem_map_handle;
+
+ if (wflags == NO_TIMESTAMP)
+ write->flags = (wflags & 0x800000FF);
+ else
+ write->flags = (0x80000000 | wflags);
+=======
+int q6asm_write_async_v2(struct audio_client *ac, uint32_t len, uint32_t msw_ts,
+ uint32_t lsw_ts, uint32_t flags)
+{
+ struct asm_data_cmd_write_v2 write;
+ struct audio_port_data *port;
+ struct audio_buffer *ab;
+ int rc = 0;
+
+ if (!(ac->io_mode & ASM_SYNC_IO_MODE))
+ return 0;
+
+ port = &ac->port[SNDRV_PCM_STREAM_PLAYBACK];
+ q6asm_add_hdr(ac, &write.hdr, sizeof(write), false,
+ ac->stream_id);
+
+ ab = &port->buf[port->dsp_buf];
+
+ write.hdr.token = port->dsp_buf;
+ write.hdr.opcode = ASM_DATA_CMD_WRITE_V2;
+ write.buf_addr_lsw = lower_32_bits(ab->phys);
+ write.buf_addr_msw = upper_32_bits(ab->phys);
+ write.buf_size = len;
+ write.seq_id = port->dsp_buf;
+ write.timestamp_lsw = lsw_ts;
+ write.timestamp_msw = msw_ts;
+ write.mem_map_handle =
+ ac->port[SNDRV_PCM_STREAM_PLAYBACK].mem_map_handle;
+
+ if (flags == NO_TIMESTAMP)
+ write.flags = (flags & 0x800000FF);
+ else
+ write.flags = (0x80000000 | flags);
+>>>>>>>
+
+ port->dsp_buf++;
+
+ if (port->dsp_buf >= port->num_periods)
+ port->dsp_buf = 0;
+
+<<<<<<<
+ rc = apr_send_pkt(ac->adev, &write);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+int q6asm_write_nolock_v1(struct audio_client *ac, uint32_t len, uint32_t msw_ts,
+ uint32_t lsw_ts, uint32_t flags)
+{
+ struct asm_stream_cmd_write_v1 write;
+ struct audio_port_data *port;
+ struct audio_buffer *ab;
+ int rc = 0;
+
+ if (!(ac->io_mode & ASM_SYNC_IO_MODE))
+ return 0;
+
+ port = &ac->port[SNDRV_PCM_STREAM_PLAYBACK];
+ q6asm_add_hdr(ac, &write.hdr, sizeof(write), false,
+ ac->stream_id);
+
+ ab = &port->buf[port->dsp_buf];
+
+ write.hdr.token = port->dsp_buf;
+ write.hdr.opcode = ASM_DATA_CMD_WRITE;
+ write.buf_add = lower_32_bits(ab->phys);
+ write.avail_bytes = len;
+ write.uid = port->dsp_buf;
+ write.lsw_ts = lsw_ts;
+ write.msw_ts = msw_ts;
+
+ if (flags == NO_TIMESTAMP)
+ write.uflags = (flags & 0x800000FF);
+ else
+ write.uflags = (0x80000000 | flags);
+
+ port->dsp_buf++;
+
+ if (port->dsp_buf >= port->num_periods)
+ port->dsp_buf = 0;
+
+ rc = apr_send_pkt(ac->adev, &write);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+/**
+ * q6asm_write_async() - non blocking write
+ *
+ * @ac: audio client pointer
+ * @len: lenght in bytes
+ * @msw_ts: timestamp msw
+ * @lsw_ts: timestamp lsw
+ * @flags: flags associated with write
+ *
+ * Return: Will be an negative value on error or zero on success
+ */
+int q6asm_write_async(struct audio_client *ac, uint32_t len, uint32_t msw_ts,
+ uint32_t lsw_ts, uint32_t flags)
+{
+ struct q6asm *a = dev_get_drvdata(ac->dev);
+
+ return a->ops->write_nolock(ac, len, msw_ts, lsw_ts, flags);
+
+=======
+ spin_unlock_irqrestore(&ac->lock, flags);
+ rc = apr_send_pkt(ac->adev, pkt);
+ if (rc == pkt_size)
+ rc = 0;
+
+ kfree(pkt);
+ return rc;
+>>>>>>>
+}
+EXPORT_SYMBOL_GPL(q6asm_write_async);
+
+static void q6asm_reset_buf_state(struct audio_client *ac)
+{
+<<<<<<<
+ int cnt = 0;
+ int loopcnt = 0;
+ int used;
+ struct audio_port_data *port = NULL;
+
+ if (!(ac->io_mode & ASM_SYNC_IO_MODE))
+ return;
+
+ used = (ac->io_mode & ASM_TUN_WRITE_IO_MODE ? 1 : 0);
+ mutex_lock(&ac->lock);
+ for (loopcnt = 0; loopcnt <= SNDRV_PCM_STREAM_CAPTURE;
+ loopcnt++) {
+ port = &ac->port[loopcnt];
+ cnt = port->num_periods - 1;
+ port->dsp_buf = 0;
+ while (cnt >= 0) {
+ if (!port->buf)
+ continue;
+ port->buf[cnt].used = used;
+ cnt--;
+ }
+ }
+ mutex_unlock(&ac->lock);
+=======
+ struct audio_port_data *port = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ac->lock, flags);
+ port = &ac->port[SNDRV_PCM_STREAM_PLAYBACK];
+ port->dsp_buf = 0;
+ port = &ac->port[SNDRV_PCM_STREAM_CAPTURE];
+ port->dsp_buf = 0;
+ spin_unlock_irqrestore(&ac->lock, flags);
+>>>>>>>
+}
+
+static int __q6asm_cmd(struct audio_client *ac, int cmd, bool wait)
+{
+ int stream_id = ac->stream_id;
+<<<<<<<
+ struct apr_hdr hdr;
+ int rc;
+
+ q6asm_add_hdr(ac, &hdr, sizeof(hdr), true, stream_id);
+
+ switch (cmd) {
+ case CMD_PAUSE:
+ hdr.opcode = ASM_SESSION_CMD_PAUSE;
+ break;
+ case CMD_SUSPEND:
+ hdr.opcode = ASM_SESSION_CMD_SUSPEND;
+ break;
+ case CMD_FLUSH:
+ hdr.opcode = ASM_STREAM_CMD_FLUSH;
+ break;
+ case CMD_OUT_FLUSH:
+ hdr.opcode = ASM_STREAM_CMD_FLUSH_READBUFS;
+ break;
+ case CMD_EOS:
+ hdr.opcode = ASM_DATA_CMD_EOS;
+ break;
+ case CMD_CLOSE:
+ hdr.opcode = ASM_STREAM_CMD_CLOSE;
+=======
+ struct apr_pkt pkt;
+ int rc;
+
+ q6asm_add_hdr(ac, &pkt.hdr, APR_HDR_SIZE, true, stream_id);
+
+ switch (cmd) {
+ case CMD_PAUSE:
+ pkt.hdr.opcode = ASM_SESSION_CMD_PAUSE;
+ break;
+ case CMD_SUSPEND:
+ pkt.hdr.opcode = ASM_SESSION_CMD_SUSPEND;
+ break;
+ case CMD_FLUSH:
+ pkt.hdr.opcode = ASM_STREAM_CMD_FLUSH;
+ break;
+ case CMD_OUT_FLUSH:
+ pkt.hdr.opcode = ASM_STREAM_CMD_FLUSH_READBUFS;
+ break;
+ case CMD_EOS:
+ pkt.hdr.opcode = ASM_DATA_CMD_EOS;
+ break;
+ case CMD_CLOSE:
+ pkt.hdr.opcode = ASM_STREAM_CMD_CLOSE;
+>>>>>>>
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (wait)
+<<<<<<<
+ rc = q6asm_ac_send_cmd_sync(ac, &hdr);
+ else
+ return apr_send_pkt(ac->adev, &hdr);
+=======
+ rc = q6asm_ac_send_cmd_sync(ac, &pkt);
+ else
+ return apr_send_pkt(ac->adev, &pkt);
+>>>>>>>
+
+ if (rc < 0)
+ return rc;
+
+ if (cmd == CMD_FLUSH)
+ q6asm_reset_buf_state(ac);
+
+ return 0;
+}
+
+<<<<<<<
+=======
+static int __q6asm_cmd_v1(struct audio_client *ac, int cmd, bool wait)
+{
+ int stream_id = ac->stream_id;
+ struct apr_hdr hdr;
+ int rc;
+
+ q6asm_add_hdr(ac, &hdr, sizeof(hdr), true, stream_id);
+
+ switch (cmd) {
+ case CMD_PAUSE:
+ hdr.opcode = ASM_SESSION_CMD_PAUSE;
+ break;
+ case CMD_SUSPEND:
+ hdr.opcode = ASM_SESSION_CMD_SUSPEND;
+ break;
+ case CMD_FLUSH:
+ hdr.opcode = ASM_STREAM_CMD_FLUSH;
+ break;
+ case CMD_OUT_FLUSH:
+ hdr.opcode = ASM_STREAM_CMD_FLUSH_READBUFS;
+ break;
+ case CMD_EOS:
+ hdr.opcode = ASM_DATA_CMD_EOS;
+ break;
+ case CMD_CLOSE:
+ hdr.opcode = ASM_STREAM_CMD_CLOSE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (wait)
+ rc = q6asm_ac_send_cmd_sync(ac, &hdr);
+ else
+ return apr_send_pkt(ac->adev, &hdr);
+
+ if (rc < 0)
+ return rc;
+
+ if (cmd == CMD_FLUSH)
+ q6asm_reset_buf_state(ac);
+
+ return 0;
+}
+>>>>>>>
+/**
+ * q6asm_cmd() - run cmd on audio client
+ *
+ * @ac: audio client pointer
+ * @cmd: command to run on audio client.
+ *
+ * Return: Will be an negative value on error or zero on success
+ */
+int q6asm_cmd(struct audio_client *ac, int cmd)
+{
+<<<<<<<
+ return __q6asm_cmd(ac, cmd, true);
+=======
+ struct q6asm *a = dev_get_drvdata(ac->dev);
+
+ return a->ops->cmd(ac, cmd, true);
+
+>>>>>>>
+}
+EXPORT_SYMBOL_GPL(q6asm_cmd);
+
+/**
+ * q6asm_cmd_nowait() - non blocking, run cmd on audio client
+ *
+ * @ac: audio client pointer
+ * @cmd: command to run on audio client.
+ *
+ * Return: Will be an negative value on error or zero on success
+ */
+int q6asm_cmd_nowait(struct audio_client *ac, int cmd)
+{
+<<<<<<<
+ return __q6asm_cmd(ac, cmd, false);
+}
+EXPORT_SYMBOL_GPL(q6asm_cmd_nowait);
+
+static int q6asm_probe(struct apr_device *adev)
+{
+ struct device *dev = &adev->dev;
+ struct device_node *dais_np;
+ struct q6asm *q6asm;
+
+ q6asm = devm_kzalloc(dev, sizeof(*q6asm), GFP_KERNEL);
+ if (!q6asm)
+ return -ENOMEM;
+
+ q6core_get_svc_api_info(adev->svc_id, &q6asm->ainfo);
+
+ q6asm->dev = dev;
+ q6asm->adev = adev;
+ init_waitqueue_head(&q6asm->mem_wait);
+ spin_lock_init(&q6asm->slock);
+ dev_set_drvdata(dev, q6asm);
+
+ dais_np = of_get_child_by_name(dev->of_node, "dais");
+ if (dais_np) {
+ q6asm->pdev_dais = of_platform_device_create(dais_np,
+ "q6asm-dai", dev);
+ of_node_put(dais_np);
+ }
+
+ return 0;
+=======
+ struct q6asm *a = dev_get_drvdata(ac->dev);
+
+ return a->ops->cmd(ac, cmd, false);
+}
+EXPORT_SYMBOL_GPL(q6asm_cmd_nowait);
+
+struct q6asm_ops q6asm_v1_ops = {
+ .memory_unmap = __q6asm_memory_unmap_v1,
+ .memory_map_regions = __q6asm_memory_map_regions_v1,
+ .open_write = q6asm_open_write_v1,
+ .run = __q6asm_run_v1,
+ .format_block_multi_ch_pcm = q6asm_media_format_block_multi_ch_pcm_v1,
+ .write_nolock = q6asm_write_nolock_v1,
+ .cmd = __q6asm_cmd_v1,
+};
+
+struct q6asm_ops q6asm_v2_ops = {
+ .memory_unmap = __q6asm_memory_unmap_v2,
+ .memory_map_regions = __q6asm_memory_map_regions_v2,
+ .open_write = q6asm_open_write_v3,
+ .run = __q6asm_run_v2,
+ .format_block_multi_ch_pcm = q6asm_media_format_block_multi_ch_pcm_v2,
+ .write_nolock = q6asm_write_async_v2,
+ .cmd = __q6asm_cmd,
+};
+
+static int q6asm_probe(struct apr_device *adev)
+{
+ struct q6asm *q6asm;
+
+ q6asm = devm_kzalloc(&adev->dev, sizeof(*q6asm), GFP_KERNEL);
+ if (!q6asm)
+ return -ENOMEM;
+
+ adev->version = q6core_get_svc_version(adev->svc_id);
+
+ if (APR_SVC_MAJOR_VERSION(adev->version) >= 0x7)
+ q6asm->ops = &q6asm_v2_ops;
+ else if (!APR_SVC_MAJOR_VERSION(adev->version))
+ q6asm->ops = &q6asm_v1_ops;
+
+ if (!q6asm->ops) {
+ dev_err(&adev->dev, "Unsupported ASM version\n");
+ return -EINVAL;
+ }
+
+ q6asm->dev = &adev->dev;
+ q6asm->adev = adev;
+ q6asm->mem_state = 0;
+ init_waitqueue_head(&q6asm->mem_wait);
+ mutex_init(&q6asm->session_lock);
+ dev_set_drvdata(&adev->dev, q6asm);
+
+ return q6asm_dai_probe(&adev->dev);
+>>>>>>>
+}
+
+static int q6asm_remove(struct apr_device *adev)
+{
+<<<<<<<
+ return q6asm_dai_remove(&adev->dev);
+}
+
+=======
+ struct q6asm *q6asm = dev_get_drvdata(&adev->dev);
+
+ if (q6asm->pdev_dais)
+ of_platform_device_destroy(&q6asm->pdev_dais->dev, NULL);
+
+ return 0;
+}
+>>>>>>>
+static const struct of_device_id q6asm_device_id[] = {
+ { .compatible = "qcom,q6asm" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, q6asm_device_id);
+
+static struct apr_driver qcom_q6asm_driver = {
+ .probe = q6asm_probe,
+ .remove = q6asm_remove,
+ .callback = q6asm_srvc_callback,
+ .driver = {
+ .name = "qcom-q6asm",
+ .of_match_table = of_match_ptr(q6asm_device_id),
+ },
+};
+
+module_apr_driver(qcom_q6asm_driver);
+MODULE_DESCRIPTION("Q6 Audio Stream Manager driver");
+MODULE_LICENSE("GPL v2");
diff --git a/rr-cache/cb6d8b1d80552ec7a2b3c39738e60e9fd1b2a501/postimage b/rr-cache/cb6d8b1d80552ec7a2b3c39738e60e9fd1b2a501/postimage
new file mode 100644
index 0000000..c7ed542
--- /dev/null
+++ b/rr-cache/cb6d8b1d80552ec7a2b3c39738e60e9fd1b2a501/postimage
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __Q6AFE_H__
+#define __Q6AFE_H__
+
+#include <dt-bindings/sound/qcom,q6afe.h>
+
+#define AFE_PORT_MAX 105
+
+#define MSM_AFE_PORT_TYPE_RX 0
+#define MSM_AFE_PORT_TYPE_TX 1
+#define AFE_MAX_PORTS AFE_PORT_MAX
+
+#define Q6AFE_MAX_MI2S_LINES 4
+
+#define AFE_MAX_CHAN_COUNT 8
+#define AFE_PORT_MAX_AUDIO_CHAN_CNT 0x8
+
+#define Q6AFE_LPASS_CLK_SRC_INTERNAL 1
+#define Q6AFE_LPASS_CLK_ROOT_DEFAULT 0
+
+#define LPAIF_DIG_CLK 1
+#define LPAIF_BIT_CLK 2
+#define LPAIF_OSR_CLK 3
+
+/* Clock ID for Primary I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_PRI_MI2S_IBIT 0x100
+/* Clock ID for Primary I2S EBIT */
+#define Q6AFE_LPASS_CLK_ID_PRI_MI2S_EBIT 0x101
+/* Clock ID for Secondary I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_SEC_MI2S_IBIT 0x102
+/* Clock ID for Secondary I2S EBIT */
+#define Q6AFE_LPASS_CLK_ID_SEC_MI2S_EBIT 0x103
+/* Clock ID for Tertiary I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_TER_MI2S_IBIT 0x104
+/* Clock ID for Tertiary I2S EBIT */
+#define Q6AFE_LPASS_CLK_ID_TER_MI2S_EBIT 0x105
+/* Clock ID for Quartnery I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_QUAD_MI2S_IBIT 0x106
+/* Clock ID for Quartnery I2S EBIT */
+#define Q6AFE_LPASS_CLK_ID_QUAD_MI2S_EBIT 0x107
+/* Clock ID for Speaker I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_SPEAKER_I2S_IBIT 0x108
+/* Clock ID for Speaker I2S EBIT */
+#define Q6AFE_LPASS_CLK_ID_SPEAKER_I2S_EBIT 0x109
+/* Clock ID for Speaker I2S OSR */
+#define Q6AFE_LPASS_CLK_ID_SPEAKER_I2S_OSR 0x10A
+
+/* Clock ID for QUINARY I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_QUI_MI2S_IBIT 0x10B
+/* Clock ID for QUINARY I2S EBIT */
+#define Q6AFE_LPASS_CLK_ID_QUI_MI2S_EBIT 0x10C
+/* Clock ID for SENARY I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_SEN_MI2S_IBIT 0x10D
+/* Clock ID for SENARY I2S EBIT */
+#define Q6AFE_LPASS_CLK_ID_SEN_MI2S_EBIT 0x10E
+/* Clock ID for INT0 I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_INT0_MI2S_IBIT 0x10F
+/* Clock ID for INT1 I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_INT1_MI2S_IBIT 0x110
+/* Clock ID for INT2 I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_INT2_MI2S_IBIT 0x111
+/* Clock ID for INT3 I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_INT3_MI2S_IBIT 0x112
+/* Clock ID for INT4 I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_INT4_MI2S_IBIT 0x113
+/* Clock ID for INT5 I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_INT5_MI2S_IBIT 0x114
+/* Clock ID for INT6 I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_INT6_MI2S_IBIT 0x115
+
+/* Clock ID for QUINARY MI2S OSR CLK */
+#define Q6AFE_LPASS_CLK_ID_QUI_MI2S_OSR 0x116
+
+/* Clock ID for Primary PCM IBIT */
+#define Q6AFE_LPASS_CLK_ID_PRI_PCM_IBIT 0x200
+/* Clock ID for Primary PCM EBIT */
+#define Q6AFE_LPASS_CLK_ID_PRI_PCM_EBIT 0x201
+/* Clock ID for Secondary PCM IBIT */
+#define Q6AFE_LPASS_CLK_ID_SEC_PCM_IBIT 0x202
+/* Clock ID for Secondary PCM EBIT */
+#define Q6AFE_LPASS_CLK_ID_SEC_PCM_EBIT 0x203
+/* Clock ID for Tertiary PCM IBIT */
+#define Q6AFE_LPASS_CLK_ID_TER_PCM_IBIT 0x204
+/* Clock ID for Tertiary PCM EBIT */
+#define Q6AFE_LPASS_CLK_ID_TER_PCM_EBIT 0x205
+/* Clock ID for Quartery PCM IBIT */
+#define Q6AFE_LPASS_CLK_ID_QUAD_PCM_IBIT 0x206
+/* Clock ID for Quartery PCM EBIT */
+#define Q6AFE_LPASS_CLK_ID_QUAD_PCM_EBIT 0x207
+/* Clock ID for Quinary PCM IBIT */
+#define Q6AFE_LPASS_CLK_ID_QUIN_PCM_IBIT 0x208
+/* Clock ID for Quinary PCM EBIT */
+#define Q6AFE_LPASS_CLK_ID_QUIN_PCM_EBIT 0x209
+/* Clock ID for QUINARY PCM OSR */
+#define Q6AFE_LPASS_CLK_ID_QUI_PCM_OSR 0x20A
+
+/** Clock ID for Primary TDM IBIT */
+#define Q6AFE_LPASS_CLK_ID_PRI_TDM_IBIT 0x200
+/** Clock ID for Primary TDM EBIT */
+#define Q6AFE_LPASS_CLK_ID_PRI_TDM_EBIT 0x201
+/** Clock ID for Secondary TDM IBIT */
+#define Q6AFE_LPASS_CLK_ID_SEC_TDM_IBIT 0x202
+/** Clock ID for Secondary TDM EBIT */
+#define Q6AFE_LPASS_CLK_ID_SEC_TDM_EBIT 0x203
+/** Clock ID for Tertiary TDM IBIT */
+#define Q6AFE_LPASS_CLK_ID_TER_TDM_IBIT 0x204
+/** Clock ID for Tertiary TDM EBIT */
+#define Q6AFE_LPASS_CLK_ID_TER_TDM_EBIT 0x205
+/** Clock ID for Quartery TDM IBIT */
+#define Q6AFE_LPASS_CLK_ID_QUAD_TDM_IBIT 0x206
+/** Clock ID for Quartery TDM EBIT */
+#define Q6AFE_LPASS_CLK_ID_QUAD_TDM_EBIT 0x207
+/** Clock ID for Quinary TDM IBIT */
+#define Q6AFE_LPASS_CLK_ID_QUIN_TDM_IBIT 0x208
+/** Clock ID for Quinary TDM EBIT */
+#define Q6AFE_LPASS_CLK_ID_QUIN_TDM_EBIT 0x209
+/** Clock ID for Quinary TDM OSR */
+#define Q6AFE_LPASS_CLK_ID_QUIN_TDM_OSR 0x20A
+
+/* Clock ID for MCLK1 */
+#define Q6AFE_LPASS_CLK_ID_MCLK_1 0x300
+/* Clock ID for MCLK2 */
+#define Q6AFE_LPASS_CLK_ID_MCLK_2 0x301
+/* Clock ID for MCLK3 */
+#define Q6AFE_LPASS_CLK_ID_MCLK_3 0x302
+/* Clock ID for MCLK4 */
+#define Q6AFE_LPASS_CLK_ID_MCLK_4 0x304
+/* Clock ID for Internal Digital Codec Core */
+#define Q6AFE_LPASS_CLK_ID_INTERNAL_DIGITAL_CODEC_CORE 0x303
+/* Clock ID for INT MCLK0 */
+#define Q6AFE_LPASS_CLK_ID_INT_MCLK_0 0x305
+/* Clock ID for INT MCLK1 */
+#define Q6AFE_LPASS_CLK_ID_INT_MCLK_1 0x306
+
+/* Clock attribute for invalid use (reserved for internal usage) */
+#define Q6AFE_LPASS_CLK_ATTRIBUTE_INVALID 0x0
+/* Clock attribute for no couple case */
+#define Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_NO 0x1
+/* Clock attribute for dividend couple case */
+#define Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_DIVIDEND 0x2
+/* Clock attribute for divisor couple case */
+#define Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_DIVISOR 0x3
+/* Clock attribute for invert and no couple case */
+#define Q6AFE_LPASS_CLK_ATTRIBUTE_INVERT_COUPLE_NO 0x4
+
+#define Q6AFE_CMAP_INVALID 0xFFFF
+
+struct q6afe_hdmi_cfg {
+ u16 datatype;
+ u16 channel_allocation;
+ u32 sample_rate;
+ u16 bit_width;
+};
+
+struct q6afe_slim_cfg {
+ u32 sample_rate;
+ u16 bit_width;
+ u16 data_format;
+ u16 num_channels;
+ u8 ch_mapping[AFE_MAX_CHAN_COUNT];
+};
+
+struct q6afe_i2s_cfg {
+ u32 sample_rate;
+ u16 bit_width;
+ u16 data_format;
+ u16 num_channels;
+ u32 sd_line_mask;
+ int fmt;
+};
+
+struct q6afe_tdm_cfg {
+ u16 num_channels;
+ u32 sample_rate;
+ u16 bit_width;
+ u16 data_format;
+ u16 sync_mode;
+ u16 sync_src;
+ u16 nslots_per_frame;
+ u16 slot_width;
+ u16 slot_mask;
+ u32 data_align_type;
+ u16 ch_mapping[AFE_MAX_CHAN_COUNT];
+};
+
+struct q6afe_port_config {
+ struct q6afe_hdmi_cfg hdmi;
+ struct q6afe_slim_cfg slim;
+ struct q6afe_i2s_cfg i2s_cfg;
+ struct q6afe_tdm_cfg tdm;
+};
+
+struct q6afe_port;
+
+struct q6afe_port *q6afe_port_get_from_id(struct device *dev, int id);
+int q6afe_port_start(struct q6afe_port *port);
+int q6afe_port_stop(struct q6afe_port *port);
+void q6afe_port_put(struct q6afe_port *port);
+int q6afe_get_port_id(int index);
+void q6afe_hdmi_port_prepare(struct q6afe_port *port,
+ struct q6afe_hdmi_cfg *cfg);
+void q6afe_slim_port_prepare(struct q6afe_port *port,
+ struct q6afe_slim_cfg *cfg);
+int q6afe_i2s_port_prepare(struct q6afe_port *port, struct q6afe_i2s_cfg *cfg);
+void q6afe_tdm_port_prepare(struct q6afe_port *port, struct q6afe_tdm_cfg *cfg);
+
+int q6afe_port_set_sysclk(struct q6afe_port *port, int clk_id,
+ int clk_src, int clk_root,
+ unsigned int freq, int dir);
+#endif /* __Q6AFE_H__ */
diff --git a/rr-cache/cb6d8b1d80552ec7a2b3c39738e60e9fd1b2a501/preimage b/rr-cache/cb6d8b1d80552ec7a2b3c39738e60e9fd1b2a501/preimage
new file mode 100644
index 0000000..e3b4089
--- /dev/null
+++ b/rr-cache/cb6d8b1d80552ec7a2b3c39738e60e9fd1b2a501/preimage
@@ -0,0 +1,246 @@
+<<<<<<<
+/* SPDX-License-Identifier: GPL-2.0 */
+=======
+// SPDX-License-Identifier: GPL-2.0
+>>>>>>>
+
+#ifndef __Q6AFE_H__
+#define __Q6AFE_H__
+
+#include <dt-bindings/sound/qcom,q6afe.h>
+
+<<<<<<<
+#define AFE_PORT_MAX 105
+=======
+#define AFE_PORT_MAX 48
+>>>>>>>
+
+#define MSM_AFE_PORT_TYPE_RX 0
+#define MSM_AFE_PORT_TYPE_TX 1
+#define AFE_MAX_PORTS AFE_PORT_MAX
+
+<<<<<<<
+=======
+#define Q6AFE_MAX_MI2S_LINES 4
+
+>>>>>>>
+#define AFE_MAX_CHAN_COUNT 8
+#define AFE_PORT_MAX_AUDIO_CHAN_CNT 0x8
+
+#define Q6AFE_LPASS_CLK_SRC_INTERNAL 1
+#define Q6AFE_LPASS_CLK_ROOT_DEFAULT 0
+
+#define LPAIF_DIG_CLK 1
+#define LPAIF_BIT_CLK 2
+#define LPAIF_OSR_CLK 3
+
+<<<<<<<
+=======
+/* Clock ID for Primary I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_PRI_MI2S_IBIT 0x100
+/* Clock ID for Primary I2S EBIT */
+#define Q6AFE_LPASS_CLK_ID_PRI_MI2S_EBIT 0x101
+/* Clock ID for Secondary I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_SEC_MI2S_IBIT 0x102
+/* Clock ID for Secondary I2S EBIT */
+#define Q6AFE_LPASS_CLK_ID_SEC_MI2S_EBIT 0x103
+/* Clock ID for Tertiary I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_TER_MI2S_IBIT 0x104
+/* Clock ID for Tertiary I2S EBIT */
+#define Q6AFE_LPASS_CLK_ID_TER_MI2S_EBIT 0x105
+/* Clock ID for Quartnery I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_QUAD_MI2S_IBIT 0x106
+/* Clock ID for Quartnery I2S EBIT */
+#define Q6AFE_LPASS_CLK_ID_QUAD_MI2S_EBIT 0x107
+/* Clock ID for Speaker I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_SPEAKER_I2S_IBIT 0x108
+/* Clock ID for Speaker I2S EBIT */
+#define Q6AFE_LPASS_CLK_ID_SPEAKER_I2S_EBIT 0x109
+/* Clock ID for Speaker I2S OSR */
+#define Q6AFE_LPASS_CLK_ID_SPEAKER_I2S_OSR 0x10A
+
+/* Clock ID for QUINARY I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_QUI_MI2S_IBIT 0x10B
+/* Clock ID for QUINARY I2S EBIT */
+#define Q6AFE_LPASS_CLK_ID_QUI_MI2S_EBIT 0x10C
+/* Clock ID for SENARY I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_SEN_MI2S_IBIT 0x10D
+/* Clock ID for SENARY I2S EBIT */
+#define Q6AFE_LPASS_CLK_ID_SEN_MI2S_EBIT 0x10E
+/* Clock ID for INT0 I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_INT0_MI2S_IBIT 0x10F
+/* Clock ID for INT1 I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_INT1_MI2S_IBIT 0x110
+/* Clock ID for INT2 I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_INT2_MI2S_IBIT 0x111
+/* Clock ID for INT3 I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_INT3_MI2S_IBIT 0x112
+/* Clock ID for INT4 I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_INT4_MI2S_IBIT 0x113
+/* Clock ID for INT5 I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_INT5_MI2S_IBIT 0x114
+/* Clock ID for INT6 I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_INT6_MI2S_IBIT 0x115
+
+/* Clock ID for QUINARY MI2S OSR CLK */
+#define Q6AFE_LPASS_CLK_ID_QUI_MI2S_OSR 0x116
+
+/* Clock ID for Primary PCM IBIT */
+#define Q6AFE_LPASS_CLK_ID_PRI_PCM_IBIT 0x200
+/* Clock ID for Primary PCM EBIT */
+#define Q6AFE_LPASS_CLK_ID_PRI_PCM_EBIT 0x201
+/* Clock ID for Secondary PCM IBIT */
+#define Q6AFE_LPASS_CLK_ID_SEC_PCM_IBIT 0x202
+/* Clock ID for Secondary PCM EBIT */
+#define Q6AFE_LPASS_CLK_ID_SEC_PCM_EBIT 0x203
+/* Clock ID for Tertiary PCM IBIT */
+#define Q6AFE_LPASS_CLK_ID_TER_PCM_IBIT 0x204
+/* Clock ID for Tertiary PCM EBIT */
+#define Q6AFE_LPASS_CLK_ID_TER_PCM_EBIT 0x205
+/* Clock ID for Quartery PCM IBIT */
+#define Q6AFE_LPASS_CLK_ID_QUAD_PCM_IBIT 0x206
+/* Clock ID for Quartery PCM EBIT */
+#define Q6AFE_LPASS_CLK_ID_QUAD_PCM_EBIT 0x207
+/* Clock ID for Quinary PCM IBIT */
+#define Q6AFE_LPASS_CLK_ID_QUIN_PCM_IBIT 0x208
+/* Clock ID for Quinary PCM EBIT */
+#define Q6AFE_LPASS_CLK_ID_QUIN_PCM_EBIT 0x209
+/* Clock ID for QUINARY PCM OSR */
+#define Q6AFE_LPASS_CLK_ID_QUI_PCM_OSR 0x20A
+
+/** Clock ID for Primary TDM IBIT */
+#define Q6AFE_LPASS_CLK_ID_PRI_TDM_IBIT 0x200
+/** Clock ID for Primary TDM EBIT */
+#define Q6AFE_LPASS_CLK_ID_PRI_TDM_EBIT 0x201
+/** Clock ID for Secondary TDM IBIT */
+#define Q6AFE_LPASS_CLK_ID_SEC_TDM_IBIT 0x202
+/** Clock ID for Secondary TDM EBIT */
+#define Q6AFE_LPASS_CLK_ID_SEC_TDM_EBIT 0x203
+/** Clock ID for Tertiary TDM IBIT */
+#define Q6AFE_LPASS_CLK_ID_TER_TDM_IBIT 0x204
+/** Clock ID for Tertiary TDM EBIT */
+#define Q6AFE_LPASS_CLK_ID_TER_TDM_EBIT 0x205
+/** Clock ID for Quartery TDM IBIT */
+#define Q6AFE_LPASS_CLK_ID_QUAD_TDM_IBIT 0x206
+/** Clock ID for Quartery TDM EBIT */
+#define Q6AFE_LPASS_CLK_ID_QUAD_TDM_EBIT 0x207
+/** Clock ID for Quinary TDM IBIT */
+#define Q6AFE_LPASS_CLK_ID_QUIN_TDM_IBIT 0x208
+/** Clock ID for Quinary TDM EBIT */
+#define Q6AFE_LPASS_CLK_ID_QUIN_TDM_EBIT 0x209
+/** Clock ID for Quinary TDM OSR */
+#define Q6AFE_LPASS_CLK_ID_QUIN_TDM_OSR 0x20A
+
+/* Clock ID for MCLK1 */
+#define Q6AFE_LPASS_CLK_ID_MCLK_1 0x300
+/* Clock ID for MCLK2 */
+#define Q6AFE_LPASS_CLK_ID_MCLK_2 0x301
+/* Clock ID for MCLK3 */
+#define Q6AFE_LPASS_CLK_ID_MCLK_3 0x302
+/* Clock ID for MCLK4 */
+#define Q6AFE_LPASS_CLK_ID_MCLK_4 0x304
+/* Clock ID for Internal Digital Codec Core */
+#define Q6AFE_LPASS_CLK_ID_INTERNAL_DIGITAL_CODEC_CORE 0x303
+/* Clock ID for INT MCLK0 */
+#define Q6AFE_LPASS_CLK_ID_INT_MCLK_0 0x305
+/* Clock ID for INT MCLK1 */
+#define Q6AFE_LPASS_CLK_ID_INT_MCLK_1 0x306
+
+/* Clock attribute for invalid use (reserved for internal usage) */
+#define Q6AFE_LPASS_CLK_ATTRIBUTE_INVALID 0x0
+/* Clock attribute for no couple case */
+#define Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_NO 0x1
+/* Clock attribute for dividend couple case */
+#define Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_DIVIDEND 0x2
+/* Clock attribute for divisor couple case */
+#define Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_DIVISOR 0x3
+/* Clock attribute for invert and no couple case */
+#define Q6AFE_LPASS_CLK_ATTRIBUTE_INVERT_COUPLE_NO 0x4
+
+#define Q6AFE_CMAP_INVALID 0xFFFF
+
+>>>>>>>
+struct q6afe_hdmi_cfg {
+ u16 datatype;
+ u16 channel_allocation;
+ u32 sample_rate;
+ u16 bit_width;
+};
+
+struct q6afe_slim_cfg {
+ u32 sample_rate;
+ u16 bit_width;
+ u16 data_format;
+ u16 num_channels;
+ u8 ch_mapping[AFE_MAX_CHAN_COUNT];
+};
+
+struct q6afe_i2s_cfg {
+ u32 sample_rate;
+ u16 bit_width;
+ u16 data_format;
+ u16 num_channels;
+<<<<<<<
+ int fmt;
+};
+
+=======
+ u32 sd_line_mask;
+ int fmt;
+};
+
+struct q6afe_tdm_cfg {
+ u16 num_channels;
+ u32 sample_rate;
+ u16 bit_width;
+ u16 data_format;
+ u16 sync_mode;
+ u16 sync_src;
+ u16 nslots_per_frame;
+ u16 slot_width;
+ u16 slot_mask;
+ u32 data_align_type;
+ u16 ch_mapping[AFE_MAX_CHAN_COUNT];
+};
+
+>>>>>>>
+struct q6afe_port_config {
+ struct q6afe_hdmi_cfg hdmi;
+ struct q6afe_slim_cfg slim;
+ struct q6afe_i2s_cfg i2s_cfg;
+<<<<<<<
+ struct q6afe_tdm_cfg tdm;
+};
+
+struct q6afe_port;
+=======
+};
+
+struct q6afe_port;
+void q6afe_set_dai_data(struct device *dev, void *data);
+void *q6afe_get_dai_data(struct device *dev);
+
+int q6afe_dai_dev_probe(struct device *dev);
+int q6afe_dai_dev_remove(struct device *dev);
+>>>>>>>
+
+struct q6afe_port *q6afe_port_get_from_id(struct device *dev, int id);
+int q6afe_port_start(struct q6afe_port *port);
+int q6afe_port_stop(struct q6afe_port *port);
+void q6afe_port_put(struct q6afe_port *port);
+int q6afe_get_port_id(int index);
+void q6afe_hdmi_port_prepare(struct q6afe_port *port,
+ struct q6afe_hdmi_cfg *cfg);
+void q6afe_slim_port_prepare(struct q6afe_port *port,
+ struct q6afe_slim_cfg *cfg);
+<<<<<<<
+int q6afe_i2s_port_prepare(struct q6afe_port *port, struct q6afe_i2s_cfg *cfg);
+void q6afe_tdm_port_prepare(struct q6afe_port *port, struct q6afe_tdm_cfg *cfg);
+=======
+void q6afe_i2s_port_prepare(struct q6afe_port *port, struct q6afe_i2s_cfg *cfg);
+>>>>>>>
+
+int q6afe_port_set_sysclk(struct q6afe_port *port, int clk_id,
+ int clk_src, int clk_root,
+ unsigned int freq, int dir);
+#endif /* __Q6AFE_H__ */
diff --git a/rr-cache/cb6d8b1d80552ec7a2b3c39738e60e9fd1b2a501/thisimage b/rr-cache/cb6d8b1d80552ec7a2b3c39738e60e9fd1b2a501/thisimage
new file mode 100644
index 0000000..e3b4089
--- /dev/null
+++ b/rr-cache/cb6d8b1d80552ec7a2b3c39738e60e9fd1b2a501/thisimage
@@ -0,0 +1,246 @@
+<<<<<<<
+/* SPDX-License-Identifier: GPL-2.0 */
+=======
+// SPDX-License-Identifier: GPL-2.0
+>>>>>>>
+
+#ifndef __Q6AFE_H__
+#define __Q6AFE_H__
+
+#include <dt-bindings/sound/qcom,q6afe.h>
+
+<<<<<<<
+#define AFE_PORT_MAX 105
+=======
+#define AFE_PORT_MAX 48
+>>>>>>>
+
+#define MSM_AFE_PORT_TYPE_RX 0
+#define MSM_AFE_PORT_TYPE_TX 1
+#define AFE_MAX_PORTS AFE_PORT_MAX
+
+<<<<<<<
+=======
+#define Q6AFE_MAX_MI2S_LINES 4
+
+>>>>>>>
+#define AFE_MAX_CHAN_COUNT 8
+#define AFE_PORT_MAX_AUDIO_CHAN_CNT 0x8
+
+#define Q6AFE_LPASS_CLK_SRC_INTERNAL 1
+#define Q6AFE_LPASS_CLK_ROOT_DEFAULT 0
+
+#define LPAIF_DIG_CLK 1
+#define LPAIF_BIT_CLK 2
+#define LPAIF_OSR_CLK 3
+
+<<<<<<<
+=======
+/* Clock ID for Primary I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_PRI_MI2S_IBIT 0x100
+/* Clock ID for Primary I2S EBIT */
+#define Q6AFE_LPASS_CLK_ID_PRI_MI2S_EBIT 0x101
+/* Clock ID for Secondary I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_SEC_MI2S_IBIT 0x102
+/* Clock ID for Secondary I2S EBIT */
+#define Q6AFE_LPASS_CLK_ID_SEC_MI2S_EBIT 0x103
+/* Clock ID for Tertiary I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_TER_MI2S_IBIT 0x104
+/* Clock ID for Tertiary I2S EBIT */
+#define Q6AFE_LPASS_CLK_ID_TER_MI2S_EBIT 0x105
+/* Clock ID for Quartnery I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_QUAD_MI2S_IBIT 0x106
+/* Clock ID for Quartnery I2S EBIT */
+#define Q6AFE_LPASS_CLK_ID_QUAD_MI2S_EBIT 0x107
+/* Clock ID for Speaker I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_SPEAKER_I2S_IBIT 0x108
+/* Clock ID for Speaker I2S EBIT */
+#define Q6AFE_LPASS_CLK_ID_SPEAKER_I2S_EBIT 0x109
+/* Clock ID for Speaker I2S OSR */
+#define Q6AFE_LPASS_CLK_ID_SPEAKER_I2S_OSR 0x10A
+
+/* Clock ID for QUINARY I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_QUI_MI2S_IBIT 0x10B
+/* Clock ID for QUINARY I2S EBIT */
+#define Q6AFE_LPASS_CLK_ID_QUI_MI2S_EBIT 0x10C
+/* Clock ID for SENARY I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_SEN_MI2S_IBIT 0x10D
+/* Clock ID for SENARY I2S EBIT */
+#define Q6AFE_LPASS_CLK_ID_SEN_MI2S_EBIT 0x10E
+/* Clock ID for INT0 I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_INT0_MI2S_IBIT 0x10F
+/* Clock ID for INT1 I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_INT1_MI2S_IBIT 0x110
+/* Clock ID for INT2 I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_INT2_MI2S_IBIT 0x111
+/* Clock ID for INT3 I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_INT3_MI2S_IBIT 0x112
+/* Clock ID for INT4 I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_INT4_MI2S_IBIT 0x113
+/* Clock ID for INT5 I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_INT5_MI2S_IBIT 0x114
+/* Clock ID for INT6 I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_INT6_MI2S_IBIT 0x115
+
+/* Clock ID for QUINARY MI2S OSR CLK */
+#define Q6AFE_LPASS_CLK_ID_QUI_MI2S_OSR 0x116
+
+/* Clock ID for Primary PCM IBIT */
+#define Q6AFE_LPASS_CLK_ID_PRI_PCM_IBIT 0x200
+/* Clock ID for Primary PCM EBIT */
+#define Q6AFE_LPASS_CLK_ID_PRI_PCM_EBIT 0x201
+/* Clock ID for Secondary PCM IBIT */
+#define Q6AFE_LPASS_CLK_ID_SEC_PCM_IBIT 0x202
+/* Clock ID for Secondary PCM EBIT */
+#define Q6AFE_LPASS_CLK_ID_SEC_PCM_EBIT 0x203
+/* Clock ID for Tertiary PCM IBIT */
+#define Q6AFE_LPASS_CLK_ID_TER_PCM_IBIT 0x204
+/* Clock ID for Tertiary PCM EBIT */
+#define Q6AFE_LPASS_CLK_ID_TER_PCM_EBIT 0x205
+/* Clock ID for Quartery PCM IBIT */
+#define Q6AFE_LPASS_CLK_ID_QUAD_PCM_IBIT 0x206
+/* Clock ID for Quartery PCM EBIT */
+#define Q6AFE_LPASS_CLK_ID_QUAD_PCM_EBIT 0x207
+/* Clock ID for Quinary PCM IBIT */
+#define Q6AFE_LPASS_CLK_ID_QUIN_PCM_IBIT 0x208
+/* Clock ID for Quinary PCM EBIT */
+#define Q6AFE_LPASS_CLK_ID_QUIN_PCM_EBIT 0x209
+/* Clock ID for QUINARY PCM OSR */
+#define Q6AFE_LPASS_CLK_ID_QUI_PCM_OSR 0x20A
+
+/** Clock ID for Primary TDM IBIT */
+#define Q6AFE_LPASS_CLK_ID_PRI_TDM_IBIT 0x200
+/** Clock ID for Primary TDM EBIT */
+#define Q6AFE_LPASS_CLK_ID_PRI_TDM_EBIT 0x201
+/** Clock ID for Secondary TDM IBIT */
+#define Q6AFE_LPASS_CLK_ID_SEC_TDM_IBIT 0x202
+/** Clock ID for Secondary TDM EBIT */
+#define Q6AFE_LPASS_CLK_ID_SEC_TDM_EBIT 0x203
+/** Clock ID for Tertiary TDM IBIT */
+#define Q6AFE_LPASS_CLK_ID_TER_TDM_IBIT 0x204
+/** Clock ID for Tertiary TDM EBIT */
+#define Q6AFE_LPASS_CLK_ID_TER_TDM_EBIT 0x205
+/** Clock ID for Quartery TDM IBIT */
+#define Q6AFE_LPASS_CLK_ID_QUAD_TDM_IBIT 0x206
+/** Clock ID for Quartery TDM EBIT */
+#define Q6AFE_LPASS_CLK_ID_QUAD_TDM_EBIT 0x207
+/** Clock ID for Quinary TDM IBIT */
+#define Q6AFE_LPASS_CLK_ID_QUIN_TDM_IBIT 0x208
+/** Clock ID for Quinary TDM EBIT */
+#define Q6AFE_LPASS_CLK_ID_QUIN_TDM_EBIT 0x209
+/** Clock ID for Quinary TDM OSR */
+#define Q6AFE_LPASS_CLK_ID_QUIN_TDM_OSR 0x20A
+
+/* Clock ID for MCLK1 */
+#define Q6AFE_LPASS_CLK_ID_MCLK_1 0x300
+/* Clock ID for MCLK2 */
+#define Q6AFE_LPASS_CLK_ID_MCLK_2 0x301
+/* Clock ID for MCLK3 */
+#define Q6AFE_LPASS_CLK_ID_MCLK_3 0x302
+/* Clock ID for MCLK4 */
+#define Q6AFE_LPASS_CLK_ID_MCLK_4 0x304
+/* Clock ID for Internal Digital Codec Core */
+#define Q6AFE_LPASS_CLK_ID_INTERNAL_DIGITAL_CODEC_CORE 0x303
+/* Clock ID for INT MCLK0 */
+#define Q6AFE_LPASS_CLK_ID_INT_MCLK_0 0x305
+/* Clock ID for INT MCLK1 */
+#define Q6AFE_LPASS_CLK_ID_INT_MCLK_1 0x306
+
+/* Clock attribute for invalid use (reserved for internal usage) */
+#define Q6AFE_LPASS_CLK_ATTRIBUTE_INVALID 0x0
+/* Clock attribute for no couple case */
+#define Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_NO 0x1
+/* Clock attribute for dividend couple case */
+#define Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_DIVIDEND 0x2
+/* Clock attribute for divisor couple case */
+#define Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_DIVISOR 0x3
+/* Clock attribute for invert and no couple case */
+#define Q6AFE_LPASS_CLK_ATTRIBUTE_INVERT_COUPLE_NO 0x4
+
+#define Q6AFE_CMAP_INVALID 0xFFFF
+
+>>>>>>>
+struct q6afe_hdmi_cfg {
+ u16 datatype;
+ u16 channel_allocation;
+ u32 sample_rate;
+ u16 bit_width;
+};
+
+struct q6afe_slim_cfg {
+ u32 sample_rate;
+ u16 bit_width;
+ u16 data_format;
+ u16 num_channels;
+ u8 ch_mapping[AFE_MAX_CHAN_COUNT];
+};
+
+struct q6afe_i2s_cfg {
+ u32 sample_rate;
+ u16 bit_width;
+ u16 data_format;
+ u16 num_channels;
+<<<<<<<
+ int fmt;
+};
+
+=======
+ u32 sd_line_mask;
+ int fmt;
+};
+
+struct q6afe_tdm_cfg {
+ u16 num_channels;
+ u32 sample_rate;
+ u16 bit_width;
+ u16 data_format;
+ u16 sync_mode;
+ u16 sync_src;
+ u16 nslots_per_frame;
+ u16 slot_width;
+ u16 slot_mask;
+ u32 data_align_type;
+ u16 ch_mapping[AFE_MAX_CHAN_COUNT];
+};
+
+>>>>>>>
+struct q6afe_port_config {
+ struct q6afe_hdmi_cfg hdmi;
+ struct q6afe_slim_cfg slim;
+ struct q6afe_i2s_cfg i2s_cfg;
+<<<<<<<
+ struct q6afe_tdm_cfg tdm;
+};
+
+struct q6afe_port;
+=======
+};
+
+struct q6afe_port;
+void q6afe_set_dai_data(struct device *dev, void *data);
+void *q6afe_get_dai_data(struct device *dev);
+
+int q6afe_dai_dev_probe(struct device *dev);
+int q6afe_dai_dev_remove(struct device *dev);
+>>>>>>>
+
+struct q6afe_port *q6afe_port_get_from_id(struct device *dev, int id);
+int q6afe_port_start(struct q6afe_port *port);
+int q6afe_port_stop(struct q6afe_port *port);
+void q6afe_port_put(struct q6afe_port *port);
+int q6afe_get_port_id(int index);
+void q6afe_hdmi_port_prepare(struct q6afe_port *port,
+ struct q6afe_hdmi_cfg *cfg);
+void q6afe_slim_port_prepare(struct q6afe_port *port,
+ struct q6afe_slim_cfg *cfg);
+<<<<<<<
+int q6afe_i2s_port_prepare(struct q6afe_port *port, struct q6afe_i2s_cfg *cfg);
+void q6afe_tdm_port_prepare(struct q6afe_port *port, struct q6afe_tdm_cfg *cfg);
+=======
+void q6afe_i2s_port_prepare(struct q6afe_port *port, struct q6afe_i2s_cfg *cfg);
+>>>>>>>
+
+int q6afe_port_set_sysclk(struct q6afe_port *port, int clk_id,
+ int clk_src, int clk_root,
+ unsigned int freq, int dir);
+#endif /* __Q6AFE_H__ */
diff --git a/rr-cache/ccea6594d754c8948c1cb70fbd1a0b1c81c949a2/postimage b/rr-cache/ccea6594d754c8948c1cb70fbd1a0b1c81c949a2/postimage
new file mode 100644
index 0000000..593f66b
--- /dev/null
+++ b/rr-cache/ccea6594d754c8948c1cb70fbd1a0b1c81c949a2/postimage
@@ -0,0 +1,1006 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+// Copyright (c) 2018, Linaro Limited
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/bitops.h>
+#include <linux/component.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include <sound/control.h>
+#include <sound/asound.h>
+#include <sound/pcm_params.h>
+#include "q6afe.h"
+#include "q6asm.h"
+#include "q6adm.h"
+#include "q6routing.h"
+
+#define DRV_NAME "q6routing-component"
+
+#define Q6ROUTING_RX_MIXERS(id) \
+ SOC_SINGLE_EXT("MultiMedia1", id, \
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,\
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("MultiMedia2", id, \
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,\
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("MultiMedia3", id, \
+ MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,\
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("MultiMedia4", id, \
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,\
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("MultiMedia5", id, \
+ MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,\
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("MultiMedia6", id, \
+ MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,\
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("MultiMedia7", id, \
+ MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,\
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("MultiMedia8", id, \
+ MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,\
+ msm_routing_put_audio_mixer),
+
+#define Q6ROUTING_RX_DAPM_ROUTE(mix_name, s) \
+ { mix_name, "MultiMedia1", "MM_DL1" }, \
+ { mix_name, "MultiMedia2", "MM_DL2" }, \
+ { mix_name, "MultiMedia3", "MM_DL3" }, \
+ { mix_name, "MultiMedia4", "MM_DL4" }, \
+ { mix_name, "MultiMedia5", "MM_DL5" }, \
+ { mix_name, "MultiMedia6", "MM_DL6" }, \
+ { mix_name, "MultiMedia7", "MM_DL7" }, \
+ { mix_name, "MultiMedia8", "MM_DL8" }, \
+ { s, NULL, mix_name }
+
+#define Q6ROUTING_TX_DAPM_ROUTE(mix_name) \
+ { mix_name, "PRI_MI2S_TX", "PRI_MI2S_TX" }, \
+ { mix_name, "SEC_MI2S_TX", "SEC_MI2S_TX" }, \
+ { mix_name, "QUAT_MI2S_TX", "QUAT_MI2S_TX" }, \
+ { mix_name, "TERT_MI2S_TX", "TERT_MI2S_TX" }, \
+ { mix_name, "PRIMARY_TDM_TX_0", "PRIMARY_TDM_TX_0"}, \
+ { mix_name, "PRIMARY_TDM_TX_1", "PRIMARY_TDM_TX_1"}, \
+ { mix_name, "PRIMARY_TDM_TX_2", "PRIMARY_TDM_TX_2"}, \
+ { mix_name, "PRIMARY_TDM_TX_3", "PRIMARY_TDM_TX_3"}, \
+ { mix_name, "PRIMARY_TDM_TX_4", "PRIMARY_TDM_TX_4"}, \
+ { mix_name, "PRIMARY_TDM_TX_5", "PRIMARY_TDM_TX_5"}, \
+ { mix_name, "PRIMARY_TDM_TX_6", "PRIMARY_TDM_TX_6"}, \
+ { mix_name, "PRIMARY_TDM_TX_7", "PRIMARY_TDM_TX_7"}, \
+ { mix_name, "SEC_TDM_TX_0", "SEC_TDM_TX_0"}, \
+ { mix_name, "SEC_TDM_TX_1", "SEC_TDM_TX_1"}, \
+ { mix_name, "SEC_TDM_TX_2", "SEC_TDM_TX_2"}, \
+ { mix_name, "SEC_TDM_TX_3", "SEC_TDM_TX_3"}, \
+ { mix_name, "SEC_TDM_TX_4", "SEC_TDM_TX_4"}, \
+ { mix_name, "SEC_TDM_TX_5", "SEC_TDM_TX_5"}, \
+ { mix_name, "SEC_TDM_TX_6", "SEC_TDM_TX_6"}, \
+ { mix_name, "SEC_TDM_TX_7", "SEC_TDM_TX_7"}, \
+ { mix_name, "TERT_TDM_TX_0", "TERT_TDM_TX_0"}, \
+ { mix_name, "TERT_TDM_TX_1", "TERT_TDM_TX_1"}, \
+ { mix_name, "TERT_TDM_TX_2", "TERT_TDM_TX_2"}, \
+ { mix_name, "TERT_TDM_TX_3", "TERT_TDM_TX_3"}, \
+ { mix_name, "TERT_TDM_TX_4", "TERT_TDM_TX_4"}, \
+ { mix_name, "TERT_TDM_TX_5", "TERT_TDM_TX_5"}, \
+ { mix_name, "TERT_TDM_TX_6", "TERT_TDM_TX_6"}, \
+ { mix_name, "TERT_TDM_TX_7", "TERT_TDM_TX_7"}, \
+ { mix_name, "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"}, \
+ { mix_name, "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"}, \
+ { mix_name, "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"}, \
+ { mix_name, "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"}, \
+ { mix_name, "QUAT_TDM_TX_4", "QUAT_TDM_TX_4"}, \
+ { mix_name, "QUAT_TDM_TX_5", "QUAT_TDM_TX_5"}, \
+ { mix_name, "QUAT_TDM_TX_6", "QUAT_TDM_TX_6"}, \
+ { mix_name, "QUAT_TDM_TX_7", "QUAT_TDM_TX_7"}, \
+ { mix_name, "QUIN_TDM_TX_0", "QUIN_TDM_TX_0"}, \
+ { mix_name, "QUIN_TDM_TX_1", "QUIN_TDM_TX_1"}, \
+ { mix_name, "QUIN_TDM_TX_2", "QUIN_TDM_TX_2"}, \
+ { mix_name, "QUIN_TDM_TX_3", "QUIN_TDM_TX_3"}, \
+ { mix_name, "QUIN_TDM_TX_4", "QUIN_TDM_TX_4"}, \
+ { mix_name, "QUIN_TDM_TX_5", "QUIN_TDM_TX_5"}, \
+ { mix_name, "QUIN_TDM_TX_6", "QUIN_TDM_TX_6"}, \
+ { mix_name, "QUIN_TDM_TX_7", "QUIN_TDM_TX_7"}
+
+#define Q6ROUTING_TX_MIXERS(id) \
+ SOC_SINGLE_EXT("PRI_MI2S_TX", PRIMARY_MI2S_TX, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("SEC_MI2S_TX", SECONDARY_MI2S_TX, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("TERT_MI2S_TX", TERTIARY_MI2S_TX, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUAT_MI2S_TX", QUATERNARY_MI2S_TX, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("PRIMARY_TDM_TX_0", PRIMARY_TDM_TX_0, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("PRIMARY_TDM_TX_1", PRIMARY_TDM_TX_1, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("PRIMARY_TDM_TX_2", PRIMARY_TDM_TX_2, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("PRIMARY_TDM_TX_3", PRIMARY_TDM_TX_3, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("PRIMARY_TDM_TX_4", PRIMARY_TDM_TX_4, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("PRIMARY_TDM_TX_5", PRIMARY_TDM_TX_5, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("PRIMARY_TDM_TX_6", PRIMARY_TDM_TX_6, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("PRIMARY_TDM_TX_7", PRIMARY_TDM_TX_7, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("SEC_TDM_TX_0", SECONDARY_TDM_TX_0, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("SEC_TDM_TX_1", SECONDARY_TDM_TX_1, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("SEC_TDM_TX_2", SECONDARY_TDM_TX_2, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("SEC_TDM_TX_3", SECONDARY_TDM_TX_3, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("SEC_TDM_TX_4", SECONDARY_TDM_TX_4, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("SEC_TDM_TX_5", SECONDARY_TDM_TX_5, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("SEC_TDM_TX_6", SECONDARY_TDM_TX_6, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("SEC_TDM_TX_7", SECONDARY_TDM_TX_7, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("TERT_TDM_TX_0", TERTIARY_TDM_TX_0, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("TERT_TDM_TX_1", TERTIARY_TDM_TX_1, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("TERT_TDM_TX_2", TERTIARY_TDM_TX_2, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("TERT_TDM_TX_3", TERTIARY_TDM_TX_3, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("TERT_TDM_TX_4", TERTIARY_TDM_TX_4, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("TERT_TDM_TX_5", TERTIARY_TDM_TX_5, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("TERT_TDM_TX_6", TERTIARY_TDM_TX_6, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("TERT_TDM_TX_7", TERTIARY_TDM_TX_7, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUAT_TDM_TX_0", QUATERNARY_TDM_TX_0, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUAT_TDM_TX_1", QUATERNARY_TDM_TX_1, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUAT_TDM_TX_2", QUATERNARY_TDM_TX_2, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUAT_TDM_TX_3", QUATERNARY_TDM_TX_3, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUAT_TDM_TX_4", QUATERNARY_TDM_TX_4, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUAT_TDM_TX_5", QUATERNARY_TDM_TX_5, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUAT_TDM_TX_6", QUATERNARY_TDM_TX_6, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUAT_TDM_TX_7", QUATERNARY_TDM_TX_7, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUIN_TDM_TX_0", QUINARY_TDM_TX_0, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUIN_TDM_TX_1", QUINARY_TDM_TX_1, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUIN_TDM_TX_2", QUINARY_TDM_TX_2, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUIN_TDM_TX_3", QUINARY_TDM_TX_3, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUIN_TDM_TX_4", QUINARY_TDM_TX_4, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUIN_TDM_TX_5", QUINARY_TDM_TX_5, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUIN_TDM_TX_6", QUINARY_TDM_TX_6, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUIN_TDM_TX_7", QUINARY_TDM_TX_7, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer),
+
+struct session_data {
+ int state;
+ int port_id;
+ int path_type;
+ int app_type;
+ int acdb_id;
+ int sample_rate;
+ int bits_per_sample;
+ int channels;
+ int perf_mode;
+ int numcopps;
+ int fedai_id;
+ unsigned long copp_map;
+ struct q6copp *copps[MAX_COPPS_PER_PORT];
+};
+
+struct msm_routing_data {
+ struct session_data sessions[MAX_SESSIONS];
+ struct session_data port_data[AFE_MAX_PORTS];
+ struct device *dev;
+ struct mutex lock;
+};
+
+static struct msm_routing_data *routing_data;
+
+/**
+ * q6routing_stream_open() - Register a new stream for route setup
+ *
+ * @fedai_id: Frontend dai id.
+ * @perf_mode: Performance mode.
+ * @stream_id: ASM stream id to map.
+ * @stream_type: Direction of stream
+ *
+ * Return: Will be an negative on error or a zero on success.
+ */
+int q6routing_stream_open(int fedai_id, int perf_mode,
+ int stream_id, int stream_type)
+{
+ int j, topology, num_copps = 0;
+ struct route_payload payload;
+ struct q6copp *copp;
+ int copp_idx;
+ struct session_data *session, *pdata;
+
+ if (!routing_data) {
+ pr_err("Routing driver not yet ready\n");
+ return -EINVAL;
+ }
+
+ session = &routing_data->sessions[stream_id - 1];
+ pdata = &routing_data->port_data[session->port_id];
+
+ mutex_lock(&routing_data->lock);
+ session->fedai_id = fedai_id;
+
+ session->path_type = pdata->path_type;
+ session->sample_rate = pdata->sample_rate;
+ session->channels = pdata->channels;
+ session->bits_per_sample = pdata->bits_per_sample;
+
+ payload.num_copps = 0; /* only RX needs to use payload */
+ topology = NULL_COPP_TOPOLOGY;
+ copp = q6adm_open(routing_data->dev, session->port_id,
+ session->path_type, session->sample_rate,
+ session->channels, topology, perf_mode,
+ session->bits_per_sample, 0, 0);
+
+ if (!copp) {
+ mutex_unlock(&routing_data->lock);
+ return -EINVAL;
+ }
+
+ copp_idx = q6adm_get_copp_id(copp);
+ set_bit(copp_idx, &session->copp_map);
+ session->copps[copp_idx] = copp;
+
+ for_each_set_bit(j, &session->copp_map, MAX_COPPS_PER_PORT) {
+ payload.port_id[num_copps] = session->port_id;
+ payload.copp_idx[num_copps] = j;
+ num_copps++;
+ }
+
+ if (num_copps) {
+ payload.num_copps = num_copps;
+ payload.session_id = stream_id;
+ q6adm_matrix_map(routing_data->dev, session->path_type,
+ payload, perf_mode);
+ }
+ mutex_unlock(&routing_data->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(q6routing_stream_open);
+
+static struct session_data *get_session_from_id(struct msm_routing_data *data,
+ int fedai_id)
+{
+ int i;
+
+ for (i = 0; i < MAX_SESSIONS; i++) {
+ if (fedai_id == data->sessions[i].fedai_id)
+ return &data->sessions[i];
+ }
+
+ return NULL;
+}
+/**
+ * q6routing_stream_close() - Deregister a stream
+ *
+ * @fedai_id: Frontend dai id.
+ * @stream_type: Direction of stream
+ *
+ * Return: Will be an negative on error or a zero on success.
+ */
+void q6routing_stream_close(int fedai_id, int stream_type)
+{
+ struct session_data *session;
+ int idx;
+
+ session = get_session_from_id(routing_data, fedai_id);
+ if (!session)
+ return;
+
+ for_each_set_bit(idx, &session->copp_map, MAX_COPPS_PER_PORT) {
+ if (session->copps[idx]) {
+ q6adm_close(routing_data->dev, session->copps[idx]);
+ session->copps[idx] = NULL;
+ }
+ }
+
+ session->fedai_id = -1;
+ session->copp_map = 0;
+}
+EXPORT_SYMBOL_GPL(q6routing_stream_close);
+
+static int msm_routing_get_audio_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_dapm_kcontrol_dapm(kcontrol);
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ int session_id = mc->shift;
+ struct snd_soc_component *c = snd_soc_dapm_to_component(dapm);
+ struct msm_routing_data *priv = dev_get_drvdata(c->dev);
+ struct session_data *session = &priv->sessions[session_id];
+
+ if (session->port_id == mc->reg)
+ ucontrol->value.integer.value[0] = 1;
+ else
+ ucontrol->value.integer.value[0] = 0;
+
+ return 0;
+}
+
+static int msm_routing_put_audio_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_dapm_kcontrol_dapm(kcontrol);
+ struct snd_soc_component *c = snd_soc_dapm_to_component(dapm);
+ struct msm_routing_data *data = dev_get_drvdata(c->dev);
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ struct snd_soc_dapm_update *update = NULL;
+ int be_id = mc->reg;
+ int session_id = mc->shift;
+ struct session_data *session = &data->sessions[session_id];
+
+ if (ucontrol->value.integer.value[0]) {
+ session->port_id = be_id;
+ snd_soc_dapm_mixer_update_power(dapm, kcontrol, 1, update);
+ } else {
+ session->port_id = -1;
+ snd_soc_dapm_mixer_update_power(dapm, kcontrol, 0, update);
+ }
+
+ return 1;
+}
+
+static const struct snd_kcontrol_new hdmi_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(HDMI_RX) };
+
+static const struct snd_kcontrol_new primary_mi2s_rx_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(PRIMARY_MI2S_RX) };
+
+static const struct snd_kcontrol_new secondary_mi2s_rx_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SECONDARY_MI2S_RX) };
+
+static const struct snd_kcontrol_new quaternary_mi2s_rx_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUATERNARY_MI2S_RX) };
+
+static const struct snd_kcontrol_new tertiary_mi2s_rx_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(TERTIARY_MI2S_RX) };
+
+static const struct snd_kcontrol_new slimbus_rx_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SLIMBUS_0_RX) };
+
+static const struct snd_kcontrol_new slimbus_1_rx_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SLIMBUS_1_RX) };
+
+static const struct snd_kcontrol_new slimbus_2_rx_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SLIMBUS_2_RX) };
+
+static const struct snd_kcontrol_new slimbus_3_rx_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SLIMBUS_3_RX) };
+
+static const struct snd_kcontrol_new slimbus_4_rx_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SLIMBUS_4_RX) };
+
+static const struct snd_kcontrol_new slimbus_5_rx_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SLIMBUS_5_RX) };
+
+static const struct snd_kcontrol_new slimbus_6_rx_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SLIMBUS_6_RX) };
+
+static const struct snd_kcontrol_new pri_tdm_rx_0_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(PRIMARY_TDM_RX_0) };
+
+static const struct snd_kcontrol_new pri_tdm_rx_1_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(PRIMARY_TDM_RX_1) };
+
+static const struct snd_kcontrol_new pri_tdm_rx_2_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(PRIMARY_TDM_RX_2) };
+
+static const struct snd_kcontrol_new pri_tdm_rx_3_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(PRIMARY_TDM_RX_3) };
+
+static const struct snd_kcontrol_new pri_tdm_rx_4_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(PRIMARY_TDM_RX_4) };
+
+static const struct snd_kcontrol_new pri_tdm_rx_5_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(PRIMARY_TDM_RX_5) };
+
+static const struct snd_kcontrol_new pri_tdm_rx_6_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(PRIMARY_TDM_RX_6) };
+
+static const struct snd_kcontrol_new pri_tdm_rx_7_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(PRIMARY_TDM_RX_7) };
+
+static const struct snd_kcontrol_new sec_tdm_rx_0_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SECONDARY_TDM_RX_0) };
+
+static const struct snd_kcontrol_new sec_tdm_rx_1_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SECONDARY_TDM_RX_1) };
+
+static const struct snd_kcontrol_new sec_tdm_rx_2_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SECONDARY_TDM_RX_2) };
+
+static const struct snd_kcontrol_new sec_tdm_rx_3_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SECONDARY_TDM_RX_3) };
+
+static const struct snd_kcontrol_new sec_tdm_rx_4_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SECONDARY_TDM_RX_4) };
+
+static const struct snd_kcontrol_new sec_tdm_rx_5_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SECONDARY_TDM_RX_5) };
+
+static const struct snd_kcontrol_new sec_tdm_rx_6_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SECONDARY_TDM_RX_6) };
+
+static const struct snd_kcontrol_new sec_tdm_rx_7_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SECONDARY_TDM_RX_7) };
+
+static const struct snd_kcontrol_new tert_tdm_rx_0_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(TERTIARY_TDM_RX_0) };
+
+static const struct snd_kcontrol_new tert_tdm_rx_1_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(TERTIARY_TDM_RX_1) };
+
+static const struct snd_kcontrol_new tert_tdm_rx_2_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(TERTIARY_TDM_RX_2) };
+
+static const struct snd_kcontrol_new tert_tdm_rx_3_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(TERTIARY_TDM_RX_3) };
+
+static const struct snd_kcontrol_new tert_tdm_rx_4_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(TERTIARY_TDM_RX_4) };
+
+static const struct snd_kcontrol_new tert_tdm_rx_5_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(TERTIARY_TDM_RX_5) };
+
+static const struct snd_kcontrol_new tert_tdm_rx_6_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(TERTIARY_TDM_RX_6) };
+
+static const struct snd_kcontrol_new tert_tdm_rx_7_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(TERTIARY_TDM_RX_7) };
+
+static const struct snd_kcontrol_new quat_tdm_rx_0_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUATERNARY_TDM_RX_0) };
+
+static const struct snd_kcontrol_new quat_tdm_rx_1_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUATERNARY_TDM_RX_1) };
+
+static const struct snd_kcontrol_new quat_tdm_rx_2_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUATERNARY_TDM_RX_2) };
+
+static const struct snd_kcontrol_new quat_tdm_rx_3_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUATERNARY_TDM_RX_3) };
+
+static const struct snd_kcontrol_new quat_tdm_rx_4_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUATERNARY_TDM_RX_4) };
+
+static const struct snd_kcontrol_new quat_tdm_rx_5_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUATERNARY_TDM_RX_5) };
+
+static const struct snd_kcontrol_new quat_tdm_rx_6_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUATERNARY_TDM_RX_6) };
+
+static const struct snd_kcontrol_new quat_tdm_rx_7_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUATERNARY_TDM_RX_7) };
+
+static const struct snd_kcontrol_new quin_tdm_rx_0_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUINARY_TDM_RX_0) };
+
+static const struct snd_kcontrol_new quin_tdm_rx_1_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUINARY_TDM_RX_1) };
+
+static const struct snd_kcontrol_new quin_tdm_rx_2_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUINARY_TDM_RX_2) };
+
+static const struct snd_kcontrol_new quin_tdm_rx_3_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUINARY_TDM_RX_3) };
+
+static const struct snd_kcontrol_new quin_tdm_rx_4_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUINARY_TDM_RX_4) };
+
+static const struct snd_kcontrol_new quin_tdm_rx_5_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUINARY_TDM_RX_5) };
+
+static const struct snd_kcontrol_new quin_tdm_rx_6_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUINARY_TDM_RX_6) };
+
+static const struct snd_kcontrol_new quin_tdm_rx_7_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUINARY_TDM_RX_7) };
+
+
+static const struct snd_kcontrol_new mmul1_mixer_controls[] = {
+ Q6ROUTING_TX_MIXERS(MSM_FRONTEND_DAI_MULTIMEDIA1) };
+
+static const struct snd_kcontrol_new mmul2_mixer_controls[] = {
+ Q6ROUTING_TX_MIXERS(MSM_FRONTEND_DAI_MULTIMEDIA2) };
+
+static const struct snd_kcontrol_new mmul3_mixer_controls[] = {
+ Q6ROUTING_TX_MIXERS(MSM_FRONTEND_DAI_MULTIMEDIA3) };
+
+static const struct snd_kcontrol_new mmul4_mixer_controls[] = {
+ Q6ROUTING_TX_MIXERS(MSM_FRONTEND_DAI_MULTIMEDIA4) };
+
+static const struct snd_kcontrol_new mmul5_mixer_controls[] = {
+ Q6ROUTING_TX_MIXERS(MSM_FRONTEND_DAI_MULTIMEDIA5) };
+
+static const struct snd_kcontrol_new mmul6_mixer_controls[] = {
+ Q6ROUTING_TX_MIXERS(MSM_FRONTEND_DAI_MULTIMEDIA6) };
+
+static const struct snd_kcontrol_new mmul7_mixer_controls[] = {
+ Q6ROUTING_TX_MIXERS(MSM_FRONTEND_DAI_MULTIMEDIA7) };
+
+static const struct snd_kcontrol_new mmul8_mixer_controls[] = {
+ Q6ROUTING_TX_MIXERS(MSM_FRONTEND_DAI_MULTIMEDIA8) };
+
+static const struct snd_soc_dapm_widget msm_qdsp6_widgets[] = {
+ /* Frontend AIF */
+ SND_SOC_DAPM_AIF_IN("MM_DL1", "MultiMedia1 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("MM_DL2", "MultiMedia2 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("MM_DL3", "MultiMedia3 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("MM_DL4", "MultiMedia4 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("MM_DL5", "MultiMedia5 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("MM_DL6", "MultiMedia6 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("MM_DL7", "MultiMedia7 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("MM_DL8", "MultiMedia8 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("MM_UL1", "MultiMedia1 Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("MM_UL2", "MultiMedia2 Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("MM_UL3", "MultiMedia3 Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("MM_UL4", "MultiMedia4 Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("MM_UL5", "MultiMedia5 Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("MM_UL6", "MultiMedia6 Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("MM_UL7", "MultiMedia7 Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("MM_UL8", "MultiMedia8 Capture", 0, 0, 0, 0),
+
+ /* Mixer definitions */
+ SND_SOC_DAPM_MIXER("HDMI Mixer", SND_SOC_NOPM, 0, 0,
+ hdmi_mixer_controls,
+ ARRAY_SIZE(hdmi_mixer_controls)),
+
+ SND_SOC_DAPM_MIXER("SLIMBUS_0_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+ slimbus_rx_mixer_controls,
+ ARRAY_SIZE(slimbus_rx_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SLIMBUS_1_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+ slimbus_1_rx_mixer_controls,
+ ARRAY_SIZE(slimbus_1_rx_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SLIMBUS_2_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+ slimbus_2_rx_mixer_controls,
+ ARRAY_SIZE(slimbus_2_rx_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SLIMBUS_3_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+ slimbus_3_rx_mixer_controls,
+ ARRAY_SIZE(slimbus_3_rx_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SLIMBUS_4_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+ slimbus_4_rx_mixer_controls,
+ ARRAY_SIZE(slimbus_4_rx_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SLIMBUS_5_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+ slimbus_5_rx_mixer_controls,
+ ARRAY_SIZE(slimbus_5_rx_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SLIMBUS_6_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+ slimbus_6_rx_mixer_controls,
+ ARRAY_SIZE(slimbus_6_rx_mixer_controls)),
+ SND_SOC_DAPM_MIXER("PRI_MI2S_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+ primary_mi2s_rx_mixer_controls,
+ ARRAY_SIZE(primary_mi2s_rx_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SEC_MI2S_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+ secondary_mi2s_rx_mixer_controls,
+ ARRAY_SIZE(secondary_mi2s_rx_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUAT_MI2S_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quaternary_mi2s_rx_mixer_controls,
+ ARRAY_SIZE(quaternary_mi2s_rx_mixer_controls)),
+ SND_SOC_DAPM_MIXER("TERT_MI2S_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+ tertiary_mi2s_rx_mixer_controls,
+ ARRAY_SIZE(tertiary_mi2s_rx_mixer_controls)),
+ SND_SOC_DAPM_MIXER("PRIMARY_TDM_RX_0 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ pri_tdm_rx_0_mixer_controls,
+ ARRAY_SIZE(pri_tdm_rx_0_mixer_controls)),
+ SND_SOC_DAPM_MIXER("PRIMARY_TDM_RX_1 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ pri_tdm_rx_1_mixer_controls,
+ ARRAY_SIZE(pri_tdm_rx_1_mixer_controls)),
+ SND_SOC_DAPM_MIXER("PRIMARY_TDM_RX_2 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ pri_tdm_rx_2_mixer_controls,
+ ARRAY_SIZE(pri_tdm_rx_2_mixer_controls)),
+ SND_SOC_DAPM_MIXER("PRIMARY_TDM_RX_3 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ pri_tdm_rx_3_mixer_controls,
+ ARRAY_SIZE(pri_tdm_rx_3_mixer_controls)),
+ SND_SOC_DAPM_MIXER("PRIMARY_TDM_RX_4 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ pri_tdm_rx_4_mixer_controls,
+ ARRAY_SIZE(pri_tdm_rx_4_mixer_controls)),
+ SND_SOC_DAPM_MIXER("PRIMARY_TDM_RX_5 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ pri_tdm_rx_5_mixer_controls,
+ ARRAY_SIZE(pri_tdm_rx_5_mixer_controls)),
+ SND_SOC_DAPM_MIXER("PRIMARY_TDM_RX_6 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ pri_tdm_rx_6_mixer_controls,
+ ARRAY_SIZE(pri_tdm_rx_6_mixer_controls)),
+ SND_SOC_DAPM_MIXER("PRIMARY_TDM_RX_7 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ pri_tdm_rx_7_mixer_controls,
+ ARRAY_SIZE(pri_tdm_rx_7_mixer_controls)),
+
+ SND_SOC_DAPM_MIXER("SEC_TDM_RX_0 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ sec_tdm_rx_0_mixer_controls,
+ ARRAY_SIZE(sec_tdm_rx_0_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SEC_TDM_RX_1 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ sec_tdm_rx_1_mixer_controls,
+ ARRAY_SIZE(sec_tdm_rx_1_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SEC_TDM_RX_2 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ sec_tdm_rx_2_mixer_controls,
+ ARRAY_SIZE(sec_tdm_rx_2_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SEC_TDM_RX_3 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ sec_tdm_rx_3_mixer_controls,
+ ARRAY_SIZE(sec_tdm_rx_3_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SEC_TDM_RX_4 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ sec_tdm_rx_4_mixer_controls,
+ ARRAY_SIZE(sec_tdm_rx_4_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SEC_TDM_RX_5 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ sec_tdm_rx_5_mixer_controls,
+ ARRAY_SIZE(sec_tdm_rx_5_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SEC_TDM_RX_6 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ sec_tdm_rx_6_mixer_controls,
+ ARRAY_SIZE(sec_tdm_rx_6_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SEC_TDM_RX_7 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ sec_tdm_rx_7_mixer_controls,
+ ARRAY_SIZE(sec_tdm_rx_7_mixer_controls)),
+
+ SND_SOC_DAPM_MIXER("TERT_TDM_RX_0 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ tert_tdm_rx_0_mixer_controls,
+ ARRAY_SIZE(tert_tdm_rx_0_mixer_controls)),
+ SND_SOC_DAPM_MIXER("TERT_TDM_RX_1 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ tert_tdm_rx_1_mixer_controls,
+ ARRAY_SIZE(tert_tdm_rx_1_mixer_controls)),
+ SND_SOC_DAPM_MIXER("TERT_TDM_RX_2 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ tert_tdm_rx_2_mixer_controls,
+ ARRAY_SIZE(tert_tdm_rx_2_mixer_controls)),
+ SND_SOC_DAPM_MIXER("TERT_TDM_RX_3 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ tert_tdm_rx_3_mixer_controls,
+ ARRAY_SIZE(tert_tdm_rx_3_mixer_controls)),
+ SND_SOC_DAPM_MIXER("TERT_TDM_RX_4 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ tert_tdm_rx_4_mixer_controls,
+ ARRAY_SIZE(tert_tdm_rx_4_mixer_controls)),
+ SND_SOC_DAPM_MIXER("TERT_TDM_RX_5 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ tert_tdm_rx_5_mixer_controls,
+ ARRAY_SIZE(tert_tdm_rx_5_mixer_controls)),
+ SND_SOC_DAPM_MIXER("TERT_TDM_RX_6 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ tert_tdm_rx_6_mixer_controls,
+ ARRAY_SIZE(tert_tdm_rx_6_mixer_controls)),
+ SND_SOC_DAPM_MIXER("TERT_TDM_RX_7 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ tert_tdm_rx_7_mixer_controls,
+ ARRAY_SIZE(tert_tdm_rx_7_mixer_controls)),
+
+ SND_SOC_DAPM_MIXER("QUAT_TDM_RX_0 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quat_tdm_rx_0_mixer_controls,
+ ARRAY_SIZE(quat_tdm_rx_0_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUAT_TDM_RX_1 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quat_tdm_rx_1_mixer_controls,
+ ARRAY_SIZE(quat_tdm_rx_1_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUAT_TDM_RX_2 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quat_tdm_rx_2_mixer_controls,
+ ARRAY_SIZE(quat_tdm_rx_2_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUAT_TDM_RX_3 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quat_tdm_rx_3_mixer_controls,
+ ARRAY_SIZE(quat_tdm_rx_3_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUAT_TDM_RX_4 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quat_tdm_rx_4_mixer_controls,
+ ARRAY_SIZE(quat_tdm_rx_4_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUAT_TDM_RX_5 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quat_tdm_rx_5_mixer_controls,
+ ARRAY_SIZE(quat_tdm_rx_5_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUAT_TDM_RX_6 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quat_tdm_rx_6_mixer_controls,
+ ARRAY_SIZE(quat_tdm_rx_6_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUAT_TDM_RX_7 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quat_tdm_rx_7_mixer_controls,
+ ARRAY_SIZE(quat_tdm_rx_7_mixer_controls)),
+
+ SND_SOC_DAPM_MIXER("QUIN_TDM_RX_0 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quin_tdm_rx_0_mixer_controls,
+ ARRAY_SIZE(quin_tdm_rx_0_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUIN_TDM_RX_1 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quin_tdm_rx_1_mixer_controls,
+ ARRAY_SIZE(quin_tdm_rx_1_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUIN_TDM_RX_2 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quin_tdm_rx_2_mixer_controls,
+ ARRAY_SIZE(quin_tdm_rx_2_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUIN_TDM_RX_3 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quin_tdm_rx_3_mixer_controls,
+ ARRAY_SIZE(quin_tdm_rx_3_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUIN_TDM_RX_4 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quin_tdm_rx_4_mixer_controls,
+ ARRAY_SIZE(quin_tdm_rx_4_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUIN_TDM_RX_5 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quin_tdm_rx_5_mixer_controls,
+ ARRAY_SIZE(quin_tdm_rx_5_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUIN_TDM_RX_6 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quin_tdm_rx_6_mixer_controls,
+ ARRAY_SIZE(quin_tdm_rx_6_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUIN_TDM_RX_7 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quin_tdm_rx_7_mixer_controls,
+ ARRAY_SIZE(quin_tdm_rx_7_mixer_controls)),
+ SND_SOC_DAPM_MIXER("MultiMedia1 Mixer", SND_SOC_NOPM, 0, 0,
+ mmul1_mixer_controls, ARRAY_SIZE(mmul1_mixer_controls)),
+ SND_SOC_DAPM_MIXER("MultiMedia2 Mixer", SND_SOC_NOPM, 0, 0,
+ mmul2_mixer_controls, ARRAY_SIZE(mmul2_mixer_controls)),
+ SND_SOC_DAPM_MIXER("MultiMedia3 Mixer", SND_SOC_NOPM, 0, 0,
+ mmul3_mixer_controls, ARRAY_SIZE(mmul3_mixer_controls)),
+ SND_SOC_DAPM_MIXER("MultiMedia4 Mixer", SND_SOC_NOPM, 0, 0,
+ mmul4_mixer_controls, ARRAY_SIZE(mmul4_mixer_controls)),
+ SND_SOC_DAPM_MIXER("MultiMedia5 Mixer", SND_SOC_NOPM, 0, 0,
+ mmul5_mixer_controls, ARRAY_SIZE(mmul5_mixer_controls)),
+ SND_SOC_DAPM_MIXER("MultiMedia6 Mixer", SND_SOC_NOPM, 0, 0,
+ mmul6_mixer_controls, ARRAY_SIZE(mmul6_mixer_controls)),
+ SND_SOC_DAPM_MIXER("MultiMedia7 Mixer", SND_SOC_NOPM, 0, 0,
+ mmul7_mixer_controls, ARRAY_SIZE(mmul7_mixer_controls)),
+ SND_SOC_DAPM_MIXER("MultiMedia8 Mixer", SND_SOC_NOPM, 0, 0,
+ mmul8_mixer_controls, ARRAY_SIZE(mmul8_mixer_controls)),
+
+};
+
+static const struct snd_soc_dapm_route intercon[] = {
+ Q6ROUTING_RX_DAPM_ROUTE("HDMI Mixer", "HDMI_RX"),
+ Q6ROUTING_RX_DAPM_ROUTE("SLIMBUS_0_RX Audio Mixer", "SLIMBUS_0_RX"),
+ Q6ROUTING_RX_DAPM_ROUTE("SLIMBUS_1_RX Audio Mixer", "SLIMBUS_1_RX"),
+ Q6ROUTING_RX_DAPM_ROUTE("SLIMBUS_2_RX Audio Mixer", "SLIMBUS_2_RX"),
+ Q6ROUTING_RX_DAPM_ROUTE("SLIMBUS_3_RX Audio Mixer", "SLIMBUS_3_RX"),
+ Q6ROUTING_RX_DAPM_ROUTE("SLIMBUS_4_RX Audio Mixer", "SLIMBUS_4_RX"),
+ Q6ROUTING_RX_DAPM_ROUTE("SLIMBUS_5_RX Audio Mixer", "SLIMBUS_5_RX"),
+ Q6ROUTING_RX_DAPM_ROUTE("SLIMBUS_6_RX Audio Mixer", "SLIMBUS_6_RX"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUAT_MI2S_RX Audio Mixer", "QUAT_MI2S_RX"),
+ Q6ROUTING_RX_DAPM_ROUTE("TERT_MI2S_RX Audio Mixer", "TERT_MI2S_RX"),
+ Q6ROUTING_RX_DAPM_ROUTE("SEC_MI2S_RX Audio Mixer", "SEC_MI2S_RX"),
+ Q6ROUTING_RX_DAPM_ROUTE("PRI_MI2S_RX Audio Mixer", "PRI_MI2S_RX"),
+ Q6ROUTING_RX_DAPM_ROUTE("PRIMARY_TDM_RX_0 Audio Mixer",
+ "PRIMARY_TDM_RX_0"),
+ Q6ROUTING_RX_DAPM_ROUTE("PRIMARY_TDM_RX_1 Audio Mixer",
+ "PRIMARY_TDM_RX_1"),
+ Q6ROUTING_RX_DAPM_ROUTE("PRIMARY_TDM_RX_2 Audio Mixer",
+ "PRIMARY_TDM_RX_2"),
+ Q6ROUTING_RX_DAPM_ROUTE("PRIMARY_TDM_RX_3 Audio Mixer",
+ "PRIMARY_TDM_RX_3"),
+ Q6ROUTING_RX_DAPM_ROUTE("PRIMARY_TDM_RX_4 Audio Mixer",
+ "PRIMARY_TDM_RX_4"),
+ Q6ROUTING_RX_DAPM_ROUTE("PRIMARY_TDM_RX_5 Audio Mixer",
+ "PRIMARY_TDM_RX_5"),
+ Q6ROUTING_RX_DAPM_ROUTE("PRIMARY_TDM_RX_6 Audio Mixer",
+ "PRIMARY_TDM_RX_6"),
+ Q6ROUTING_RX_DAPM_ROUTE("PRIMARY_TDM_RX_7 Audio Mixer",
+ "PRIMARY_TDM_RX_7"),
+ Q6ROUTING_RX_DAPM_ROUTE("SEC_TDM_RX_0 Audio Mixer", "SEC_TDM_RX_0"),
+ Q6ROUTING_RX_DAPM_ROUTE("SEC_TDM_RX_1 Audio Mixer", "SEC_TDM_RX_1"),
+ Q6ROUTING_RX_DAPM_ROUTE("SEC_TDM_RX_2 Audio Mixer", "SEC_TDM_RX_2"),
+ Q6ROUTING_RX_DAPM_ROUTE("SEC_TDM_RX_3 Audio Mixer", "SEC_TDM_RX_3"),
+ Q6ROUTING_RX_DAPM_ROUTE("SEC_TDM_RX_4 Audio Mixer", "SEC_TDM_RX_4"),
+ Q6ROUTING_RX_DAPM_ROUTE("SEC_TDM_RX_5 Audio Mixer", "SEC_TDM_RX_5"),
+ Q6ROUTING_RX_DAPM_ROUTE("SEC_TDM_RX_6 Audio Mixer", "SEC_TDM_RX_6"),
+ Q6ROUTING_RX_DAPM_ROUTE("SEC_TDM_RX_7 Audio Mixer", "SEC_TDM_RX_7"),
+ Q6ROUTING_RX_DAPM_ROUTE("TERT_TDM_RX_0 Audio Mixer", "TERT_TDM_RX_0"),
+ Q6ROUTING_RX_DAPM_ROUTE("TERT_TDM_RX_1 Audio Mixer", "TERT_TDM_RX_1"),
+ Q6ROUTING_RX_DAPM_ROUTE("TERT_TDM_RX_2 Audio Mixer", "TERT_TDM_RX_2"),
+ Q6ROUTING_RX_DAPM_ROUTE("TERT_TDM_RX_3 Audio Mixer", "TERT_TDM_RX_3"),
+ Q6ROUTING_RX_DAPM_ROUTE("TERT_TDM_RX_4 Audio Mixer", "TERT_TDM_RX_4"),
+ Q6ROUTING_RX_DAPM_ROUTE("TERT_TDM_RX_5 Audio Mixer", "TERT_TDM_RX_5"),
+ Q6ROUTING_RX_DAPM_ROUTE("TERT_TDM_RX_6 Audio Mixer", "TERT_TDM_RX_6"),
+ Q6ROUTING_RX_DAPM_ROUTE("TERT_TDM_RX_7 Audio Mixer", "TERT_TDM_RX_7"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUAT_TDM_RX_0 Audio Mixer", "QUAT_TDM_RX_0"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUAT_TDM_RX_1 Audio Mixer", "QUAT_TDM_RX_1"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUAT_TDM_RX_2 Audio Mixer", "QUAT_TDM_RX_2"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUAT_TDM_RX_3 Audio Mixer", "QUAT_TDM_RX_3"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUAT_TDM_RX_4 Audio Mixer", "QUAT_TDM_RX_4"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUAT_TDM_RX_5 Audio Mixer", "QUAT_TDM_RX_5"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUAT_TDM_RX_6 Audio Mixer", "QUAT_TDM_RX_6"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUAT_TDM_RX_7 Audio Mixer", "QUAT_TDM_RX_7"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUIN_TDM_RX_0 Audio Mixer", "QUIN_TDM_RX_0"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUIN_TDM_RX_1 Audio Mixer", "QUIN_TDM_RX_1"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUIN_TDM_RX_2 Audio Mixer", "QUIN_TDM_RX_2"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUIN_TDM_RX_3 Audio Mixer", "QUIN_TDM_RX_3"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUIN_TDM_RX_4 Audio Mixer", "QUIN_TDM_RX_4"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUIN_TDM_RX_5 Audio Mixer", "QUIN_TDM_RX_5"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUIN_TDM_RX_6 Audio Mixer", "QUIN_TDM_RX_6"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUIN_TDM_RX_7 Audio Mixer", "QUIN_TDM_RX_7"),
+ Q6ROUTING_TX_DAPM_ROUTE("MultiMedia1 Mixer"),
+ Q6ROUTING_TX_DAPM_ROUTE("MultiMedia2 Mixer"),
+ Q6ROUTING_TX_DAPM_ROUTE("MultiMedia3 Mixer"),
+ Q6ROUTING_TX_DAPM_ROUTE("MultiMedia4 Mixer"),
+ Q6ROUTING_TX_DAPM_ROUTE("MultiMedia5 Mixer"),
+ Q6ROUTING_TX_DAPM_ROUTE("MultiMedia6 Mixer"),
+ Q6ROUTING_TX_DAPM_ROUTE("MultiMedia7 Mixer"),
+ Q6ROUTING_TX_DAPM_ROUTE("MultiMedia8 Mixer"),
+
+ {"MM_UL1", NULL, "MultiMedia1 Mixer"},
+ {"MM_UL2", NULL, "MultiMedia2 Mixer"},
+ {"MM_UL3", NULL, "MultiMedia3 Mixer"},
+ {"MM_UL4", NULL, "MultiMedia4 Mixer"},
+ {"MM_UL5", NULL, "MultiMedia5 Mixer"},
+ {"MM_UL6", NULL, "MultiMedia6 Mixer"},
+ {"MM_UL7", NULL, "MultiMedia7 Mixer"},
+ {"MM_UL8", NULL, "MultiMedia8 Mixer"},
+};
+
+static int routing_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_component *c = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
+ struct msm_routing_data *data = dev_get_drvdata(c->dev);
+ unsigned int be_id = rtd->cpu_dai->id;
+ struct session_data *session;
+ int path_type;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ path_type = ADM_PATH_PLAYBACK;
+ else
+ path_type = ADM_PATH_LIVE_REC;
+
+ if (be_id > AFE_MAX_PORTS)
+ return -EINVAL;
+
+ session = &data->port_data[be_id];
+
+ mutex_lock(&data->lock);
+
+ session->path_type = path_type;
+ session->sample_rate = params_rate(params);
+ session->channels = params_channels(params);
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ session->bits_per_sample = 16;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ session->bits_per_sample = 24;
+ break;
+ default:
+ break;
+ }
+
+ mutex_unlock(&data->lock);
+ return 0;
+}
+
+static struct snd_pcm_ops q6pcm_routing_ops = {
+ .hw_params = routing_hw_params,
+};
+
+static int msm_routing_probe(struct snd_soc_component *c)
+{
+ int i;
+
+ for (i = 0; i < MAX_SESSIONS; i++)
+ routing_data->sessions[i].port_id = -1;
+
+ return 0;
+}
+
+static const struct snd_soc_component_driver msm_soc_routing_component = {
+ .ops = &q6pcm_routing_ops,
+ .probe = msm_routing_probe,
+ .name = DRV_NAME,
+ .dapm_widgets = msm_qdsp6_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(msm_qdsp6_widgets),
+ .dapm_routes = intercon,
+ .num_dapm_routes = ARRAY_SIZE(intercon),
+};
+
+static int q6routing_dai_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ routing_data = kzalloc(sizeof(*routing_data), GFP_KERNEL);
+ if (!routing_data)
+ return -ENOMEM;
+
+ routing_data->dev = dev;
+
+ mutex_init(&routing_data->lock);
+ dev_set_drvdata(dev, routing_data);
+
+ return snd_soc_register_component(dev, &msm_soc_routing_component,
+ NULL, 0);
+}
+
+static void q6routing_dai_unbind(struct device *dev, struct device *master,
+ void *d)
+{
+ struct msm_routing_data *data = dev_get_drvdata(dev);
+
+ snd_soc_unregister_component(dev);
+
+ kfree(data);
+
+ routing_data = NULL;
+}
+
+static const struct component_ops q6routing_dai_comp_ops = {
+ .bind = q6routing_dai_bind,
+ .unbind = q6routing_dai_unbind,
+};
+
+static int q6pcm_routing_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &q6routing_dai_comp_ops);
+}
+
+static int q6pcm_routing_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &q6routing_dai_comp_ops);
+ return 0;
+}
+
+static struct platform_driver q6pcm_routing_platform_driver = {
+ .driver = {
+ .name = "q6routing",
+ },
+ .probe = q6pcm_routing_probe,
+ .remove = q6pcm_routing_remove,
+};
+module_platform_driver(q6pcm_routing_platform_driver);
+
+MODULE_DESCRIPTION("Q6 Routing platform");
+MODULE_LICENSE("GPL v2");
diff --git a/rr-cache/ccea6594d754c8948c1cb70fbd1a0b1c81c949a2/preimage b/rr-cache/ccea6594d754c8948c1cb70fbd1a0b1c81c949a2/preimage
new file mode 100644
index 0000000..e191103
--- /dev/null
+++ b/rr-cache/ccea6594d754c8948c1cb70fbd1a0b1c81c949a2/preimage
@@ -0,0 +1,1583 @@
+// SPDX-License-Identifier: GPL-2.0
+<<<<<<<
+/*
+ * Copyright (c) 2011-2017, The Linux Foundation
+ * Copyright (c) 2018, Linaro Limited
+ */
+=======
+// Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+// Copyright (c) 2018, Linaro Limited
+>>>>>>>
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+<<<<<<<
+#include <linux/bitops.h>
+=======
+#include <linux/of_platform.h>
+#include <linux/bitops.h>
+#include <linux/component.h>
+>>>>>>>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include <sound/control.h>
+#include <sound/asound.h>
+#include <sound/pcm_params.h>
+#include "q6afe.h"
+#include "q6asm.h"
+#include "q6adm.h"
+#include "q6routing.h"
+
+<<<<<<<
+=======
+#define DRV_NAME "q6routing-component"
+
+#define Q6ROUTING_RX_MIXERS(id) \
+ SOC_SINGLE_EXT("MultiMedia1", id, \
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,\
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("MultiMedia2", id, \
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,\
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("MultiMedia3", id, \
+ MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,\
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("MultiMedia4", id, \
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,\
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("MultiMedia5", id, \
+ MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,\
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("MultiMedia6", id, \
+ MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,\
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("MultiMedia7", id, \
+ MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,\
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("MultiMedia8", id, \
+ MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,\
+ msm_routing_put_audio_mixer),
+
+#define Q6ROUTING_RX_DAPM_ROUTE(mix_name, s) \
+ { mix_name, "MultiMedia1", "MM_DL1" }, \
+ { mix_name, "MultiMedia2", "MM_DL2" }, \
+ { mix_name, "MultiMedia3", "MM_DL3" }, \
+ { mix_name, "MultiMedia4", "MM_DL4" }, \
+ { mix_name, "MultiMedia5", "MM_DL5" }, \
+ { mix_name, "MultiMedia6", "MM_DL6" }, \
+ { mix_name, "MultiMedia7", "MM_DL7" }, \
+ { mix_name, "MultiMedia8", "MM_DL8" }, \
+ { s, NULL, mix_name }
+
+#define Q6ROUTING_TX_DAPM_ROUTE(mix_name) \
+ { mix_name, "PRI_MI2S_TX", "PRI_MI2S_TX" }, \
+ { mix_name, "SEC_MI2S_TX", "SEC_MI2S_TX" }, \
+ { mix_name, "QUAT_MI2S_TX", "QUAT_MI2S_TX" }, \
+ { mix_name, "TERT_MI2S_TX", "TERT_MI2S_TX" }, \
+ { mix_name, "PRIMARY_TDM_TX_0", "PRIMARY_TDM_TX_0"}, \
+ { mix_name, "PRIMARY_TDM_TX_1", "PRIMARY_TDM_TX_1"}, \
+ { mix_name, "PRIMARY_TDM_TX_2", "PRIMARY_TDM_TX_2"}, \
+ { mix_name, "PRIMARY_TDM_TX_3", "PRIMARY_TDM_TX_3"}, \
+ { mix_name, "PRIMARY_TDM_TX_4", "PRIMARY_TDM_TX_4"}, \
+ { mix_name, "PRIMARY_TDM_TX_5", "PRIMARY_TDM_TX_5"}, \
+ { mix_name, "PRIMARY_TDM_TX_6", "PRIMARY_TDM_TX_6"}, \
+ { mix_name, "PRIMARY_TDM_TX_7", "PRIMARY_TDM_TX_7"}, \
+ { mix_name, "SEC_TDM_TX_0", "SEC_TDM_TX_0"}, \
+ { mix_name, "SEC_TDM_TX_1", "SEC_TDM_TX_1"}, \
+ { mix_name, "SEC_TDM_TX_2", "SEC_TDM_TX_2"}, \
+ { mix_name, "SEC_TDM_TX_3", "SEC_TDM_TX_3"}, \
+ { mix_name, "SEC_TDM_TX_4", "SEC_TDM_TX_4"}, \
+ { mix_name, "SEC_TDM_TX_5", "SEC_TDM_TX_5"}, \
+ { mix_name, "SEC_TDM_TX_6", "SEC_TDM_TX_6"}, \
+ { mix_name, "SEC_TDM_TX_7", "SEC_TDM_TX_7"}, \
+ { mix_name, "TERT_TDM_TX_0", "TERT_TDM_TX_0"}, \
+ { mix_name, "TERT_TDM_TX_1", "TERT_TDM_TX_1"}, \
+ { mix_name, "TERT_TDM_TX_2", "TERT_TDM_TX_2"}, \
+ { mix_name, "TERT_TDM_TX_3", "TERT_TDM_TX_3"}, \
+ { mix_name, "TERT_TDM_TX_4", "TERT_TDM_TX_4"}, \
+ { mix_name, "TERT_TDM_TX_5", "TERT_TDM_TX_5"}, \
+ { mix_name, "TERT_TDM_TX_6", "TERT_TDM_TX_6"}, \
+ { mix_name, "TERT_TDM_TX_7", "TERT_TDM_TX_7"}, \
+ { mix_name, "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"}, \
+ { mix_name, "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"}, \
+ { mix_name, "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"}, \
+ { mix_name, "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"}, \
+ { mix_name, "QUAT_TDM_TX_4", "QUAT_TDM_TX_4"}, \
+ { mix_name, "QUAT_TDM_TX_5", "QUAT_TDM_TX_5"}, \
+ { mix_name, "QUAT_TDM_TX_6", "QUAT_TDM_TX_6"}, \
+ { mix_name, "QUAT_TDM_TX_7", "QUAT_TDM_TX_7"}, \
+ { mix_name, "QUIN_TDM_TX_0", "QUIN_TDM_TX_0"}, \
+ { mix_name, "QUIN_TDM_TX_1", "QUIN_TDM_TX_1"}, \
+ { mix_name, "QUIN_TDM_TX_2", "QUIN_TDM_TX_2"}, \
+ { mix_name, "QUIN_TDM_TX_3", "QUIN_TDM_TX_3"}, \
+ { mix_name, "QUIN_TDM_TX_4", "QUIN_TDM_TX_4"}, \
+ { mix_name, "QUIN_TDM_TX_5", "QUIN_TDM_TX_5"}, \
+ { mix_name, "QUIN_TDM_TX_6", "QUIN_TDM_TX_6"}, \
+ { mix_name, "QUIN_TDM_TX_7", "QUIN_TDM_TX_7"}
+
+#define Q6ROUTING_TX_MIXERS(id) \
+ SOC_SINGLE_EXT("PRI_MI2S_TX", PRIMARY_MI2S_TX, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("SEC_MI2S_TX", SECONDARY_MI2S_TX, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("TERT_MI2S_TX", TERTIARY_MI2S_TX, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUAT_MI2S_TX", QUATERNARY_MI2S_TX, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("PRIMARY_TDM_TX_0", PRIMARY_TDM_TX_0, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("PRIMARY_TDM_TX_1", PRIMARY_TDM_TX_1, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("PRIMARY_TDM_TX_2", PRIMARY_TDM_TX_2, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("PRIMARY_TDM_TX_3", PRIMARY_TDM_TX_3, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("PRIMARY_TDM_TX_4", PRIMARY_TDM_TX_4, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("PRIMARY_TDM_TX_5", PRIMARY_TDM_TX_5, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("PRIMARY_TDM_TX_6", PRIMARY_TDM_TX_6, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("PRIMARY_TDM_TX_7", PRIMARY_TDM_TX_7, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("SEC_TDM_TX_0", SECONDARY_TDM_TX_0, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("SEC_TDM_TX_1", SECONDARY_TDM_TX_1, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("SEC_TDM_TX_2", SECONDARY_TDM_TX_2, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("SEC_TDM_TX_3", SECONDARY_TDM_TX_3, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("SEC_TDM_TX_4", SECONDARY_TDM_TX_4, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("SEC_TDM_TX_5", SECONDARY_TDM_TX_5, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("SEC_TDM_TX_6", SECONDARY_TDM_TX_6, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("SEC_TDM_TX_7", SECONDARY_TDM_TX_7, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("TERT_TDM_TX_0", TERTIARY_TDM_TX_0, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("TERT_TDM_TX_1", TERTIARY_TDM_TX_1, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("TERT_TDM_TX_2", TERTIARY_TDM_TX_2, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("TERT_TDM_TX_3", TERTIARY_TDM_TX_3, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("TERT_TDM_TX_4", TERTIARY_TDM_TX_4, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("TERT_TDM_TX_5", TERTIARY_TDM_TX_5, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("TERT_TDM_TX_6", TERTIARY_TDM_TX_6, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("TERT_TDM_TX_7", TERTIARY_TDM_TX_7, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUAT_TDM_TX_0", QUATERNARY_TDM_TX_0, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUAT_TDM_TX_1", QUATERNARY_TDM_TX_1, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUAT_TDM_TX_2", QUATERNARY_TDM_TX_2, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUAT_TDM_TX_3", QUATERNARY_TDM_TX_3, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUAT_TDM_TX_4", QUATERNARY_TDM_TX_4, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUAT_TDM_TX_5", QUATERNARY_TDM_TX_5, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUAT_TDM_TX_6", QUATERNARY_TDM_TX_6, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUAT_TDM_TX_7", QUATERNARY_TDM_TX_7, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUIN_TDM_TX_0", QUINARY_TDM_TX_0, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUIN_TDM_TX_1", QUINARY_TDM_TX_1, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUIN_TDM_TX_2", QUINARY_TDM_TX_2, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUIN_TDM_TX_3", QUINARY_TDM_TX_3, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUIN_TDM_TX_4", QUINARY_TDM_TX_4, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUIN_TDM_TX_5", QUINARY_TDM_TX_5, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUIN_TDM_TX_6", QUINARY_TDM_TX_6, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUIN_TDM_TX_7", QUINARY_TDM_TX_7, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer),
+
+>>>>>>>
+struct session_data {
+ int state;
+ int port_id;
+ int path_type;
+ int app_type;
+ int acdb_id;
+ int sample_rate;
+ int bits_per_sample;
+ int channels;
+ int perf_mode;
+ int numcopps;
+ int fedai_id;
+ unsigned long copp_map;
+<<<<<<<
+=======
+ struct q6copp *copps[MAX_COPPS_PER_PORT];
+>>>>>>>
+};
+
+struct msm_routing_data {
+ struct session_data sessions[MAX_SESSIONS];
+ struct session_data port_data[AFE_MAX_PORTS];
+ struct device *dev;
+ struct mutex lock;
+};
+
+static struct msm_routing_data *routing_data;
+
+/**
+ * q6routing_stream_open() - Register a new stream for route setup
+ *
+ * @fedai_id: Frontend dai id.
+ * @perf_mode: Performance mode.
+ * @stream_id: ASM stream id to map.
+ * @stream_type: Direction of stream
+ *
+ * Return: Will be an negative on error or a zero on success.
+ */
+int q6routing_stream_open(int fedai_id, int perf_mode,
+ int stream_id, int stream_type)
+{
+ int j, topology, num_copps = 0;
+ struct route_payload payload;
+<<<<<<<
+=======
+ struct q6copp *copp;
+>>>>>>>
+ int copp_idx;
+ struct session_data *session, *pdata;
+
+ if (!routing_data) {
+ pr_err("Routing driver not yet ready\n");
+ return -EINVAL;
+ }
+
+ session = &routing_data->sessions[stream_id - 1];
+ pdata = &routing_data->port_data[session->port_id];
+
+ mutex_lock(&routing_data->lock);
+ session->fedai_id = fedai_id;
+
+ session->path_type = pdata->path_type;
+ session->sample_rate = pdata->sample_rate;
+ session->channels = pdata->channels;
+ session->bits_per_sample = pdata->bits_per_sample;
+
+ payload.num_copps = 0; /* only RX needs to use payload */
+ topology = NULL_COPP_TOPOLOGY;
+<<<<<<<
+ copp = q6adm_open(routing_data->dev, session->port_id,
+=======
+ copp_idx = q6adm_open(routing_data->dev, session->port_id,
+>>>>>>>
+ session->path_type, session->sample_rate,
+ session->channels, topology, perf_mode,
+ session->bits_per_sample, 0, 0);
+
+<<<<<<<
+ if (!copp) {
+=======
+ if (copp_idx < 0) {
+>>>>>>>
+ mutex_unlock(&routing_data->lock);
+ return -EINVAL;
+ }
+
+<<<<<<<
+ copp_idx = q6adm_get_copp_id(copp);
+ set_bit(copp_idx, &session->copp_map);
+ session->copps[copp_idx] = copp;
+=======
+ set_bit(copp_idx, &session->copp_map);
+>>>>>>>
+
+ for_each_set_bit(j, &session->copp_map, MAX_COPPS_PER_PORT) {
+ payload.port_id[num_copps] = session->port_id;
+ payload.copp_idx[num_copps] = j;
+ num_copps++;
+ }
+
+ if (num_copps) {
+ payload.num_copps = num_copps;
+ payload.session_id = stream_id;
+ q6adm_matrix_map(routing_data->dev, session->path_type,
+ payload, perf_mode);
+ }
+ mutex_unlock(&routing_data->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(q6routing_stream_open);
+
+static struct session_data *get_session_from_id(struct msm_routing_data *data,
+ int fedai_id)
+{
+ int i;
+
+ for (i = 0; i < MAX_SESSIONS; i++) {
+ if (fedai_id == data->sessions[i].fedai_id)
+ return &data->sessions[i];
+ }
+
+ return NULL;
+}
+/**
+ * q6routing_stream_close() - Deregister a stream
+ *
+ * @fedai_id: Frontend dai id.
+ * @stream_type: Direction of stream
+ *
+ * Return: Will be an negative on error or a zero on success.
+ */
+void q6routing_stream_close(int fedai_id, int stream_type)
+{
+ struct session_data *session;
+ int idx;
+
+ session = get_session_from_id(routing_data, fedai_id);
+ if (!session)
+ return;
+
+<<<<<<<
+ for_each_set_bit(idx, &session->copp_map, MAX_COPPS_PER_PORT)
+ q6adm_close(routing_data->dev, session->port_id,
+ session->perf_mode, idx);
+=======
+ for_each_set_bit(idx, &session->copp_map, MAX_COPPS_PER_PORT) {
+ if (session->copps[idx]) {
+ q6adm_close(routing_data->dev, session->copps[idx]);
+ session->copps[idx] = NULL;
+ }
+ }
+>>>>>>>
+
+ session->fedai_id = -1;
+ session->copp_map = 0;
+}
+EXPORT_SYMBOL_GPL(q6routing_stream_close);
+
+static int msm_routing_get_audio_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_dapm_kcontrol_dapm(kcontrol);
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ int session_id = mc->shift;
+<<<<<<<
+ struct snd_soc_component *c = snd_soc_dapm_to_component(dapm);
+ struct msm_routing_data *priv = dev_get_drvdata(c->dev);
+ struct session_data *session = &priv->sessions[session_id];
+
+ if (session->port_id == mc->reg)
+=======
+ struct snd_soc_platform *platform = snd_soc_dapm_to_platform(dapm);
+ struct msm_routing_data *priv = q6adm_get_routing_data(platform->dev);
+ struct session_data *session = &priv->sessions[session_id];
+
+ if (session->port_id != -1)
+>>>>>>>
+ ucontrol->value.integer.value[0] = 1;
+ else
+ ucontrol->value.integer.value[0] = 0;
+
+ return 0;
+}
+
+static int msm_routing_put_audio_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_dapm_kcontrol_dapm(kcontrol);
+<<<<<<<
+ struct snd_soc_component *c = snd_soc_dapm_to_component(dapm);
+ struct msm_routing_data *data = dev_get_drvdata(c->dev);
+=======
+ struct snd_soc_platform *platform = snd_soc_dapm_to_platform(dapm);
+ struct msm_routing_data *data = q6adm_get_routing_data(platform->dev);
+>>>>>>>
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ struct snd_soc_dapm_update *update = NULL;
+ int be_id = mc->reg;
+ int session_id = mc->shift;
+ struct session_data *session = &data->sessions[session_id];
+
+ if (ucontrol->value.integer.value[0]) {
+ session->port_id = be_id;
+ snd_soc_dapm_mixer_update_power(dapm, kcontrol, 1, update);
+ } else {
+ session->port_id = -1;
+ snd_soc_dapm_mixer_update_power(dapm, kcontrol, 0, update);
+ }
+
+ return 1;
+}
+
+static const struct snd_kcontrol_new hdmi_mixer_controls[] = {
+<<<<<<<
+ Q6ROUTING_RX_MIXERS(HDMI_RX) };
+
+static const struct snd_kcontrol_new primary_mi2s_rx_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(PRIMARY_MI2S_RX) };
+
+static const struct snd_kcontrol_new secondary_mi2s_rx_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SECONDARY_MI2S_RX) };
+
+static const struct snd_kcontrol_new quaternary_mi2s_rx_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUATERNARY_MI2S_RX) };
+
+static const struct snd_kcontrol_new tertiary_mi2s_rx_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(TERTIARY_MI2S_RX) };
+
+static const struct snd_kcontrol_new slimbus_rx_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SLIMBUS_0_RX) };
+
+static const struct snd_kcontrol_new slimbus_1_rx_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SLIMBUS_1_RX) };
+
+static const struct snd_kcontrol_new slimbus_2_rx_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SLIMBUS_2_RX) };
+
+static const struct snd_kcontrol_new slimbus_3_rx_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SLIMBUS_3_RX) };
+
+static const struct snd_kcontrol_new slimbus_4_rx_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SLIMBUS_4_RX) };
+
+static const struct snd_kcontrol_new slimbus_5_rx_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SLIMBUS_5_RX) };
+
+static const struct snd_kcontrol_new slimbus_6_rx_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SLIMBUS_6_RX) };
+
+static const struct snd_kcontrol_new pri_tdm_rx_0_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(PRIMARY_TDM_RX_0) };
+
+static const struct snd_kcontrol_new pri_tdm_rx_1_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(PRIMARY_TDM_RX_1) };
+
+static const struct snd_kcontrol_new pri_tdm_rx_2_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(PRIMARY_TDM_RX_2) };
+
+static const struct snd_kcontrol_new pri_tdm_rx_3_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(PRIMARY_TDM_RX_3) };
+
+static const struct snd_kcontrol_new pri_tdm_rx_4_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(PRIMARY_TDM_RX_4) };
+
+static const struct snd_kcontrol_new pri_tdm_rx_5_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(PRIMARY_TDM_RX_5) };
+
+static const struct snd_kcontrol_new pri_tdm_rx_6_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(PRIMARY_TDM_RX_6) };
+
+static const struct snd_kcontrol_new pri_tdm_rx_7_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(PRIMARY_TDM_RX_7) };
+
+static const struct snd_kcontrol_new sec_tdm_rx_0_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SECONDARY_TDM_RX_0) };
+
+static const struct snd_kcontrol_new sec_tdm_rx_1_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SECONDARY_TDM_RX_1) };
+
+static const struct snd_kcontrol_new sec_tdm_rx_2_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SECONDARY_TDM_RX_2) };
+
+static const struct snd_kcontrol_new sec_tdm_rx_3_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SECONDARY_TDM_RX_3) };
+
+static const struct snd_kcontrol_new sec_tdm_rx_4_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SECONDARY_TDM_RX_4) };
+
+static const struct snd_kcontrol_new sec_tdm_rx_5_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SECONDARY_TDM_RX_5) };
+
+static const struct snd_kcontrol_new sec_tdm_rx_6_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SECONDARY_TDM_RX_6) };
+
+static const struct snd_kcontrol_new sec_tdm_rx_7_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SECONDARY_TDM_RX_7) };
+
+static const struct snd_kcontrol_new tert_tdm_rx_0_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(TERTIARY_TDM_RX_0) };
+
+static const struct snd_kcontrol_new tert_tdm_rx_1_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(TERTIARY_TDM_RX_1) };
+
+static const struct snd_kcontrol_new tert_tdm_rx_2_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(TERTIARY_TDM_RX_2) };
+
+static const struct snd_kcontrol_new tert_tdm_rx_3_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(TERTIARY_TDM_RX_3) };
+
+static const struct snd_kcontrol_new tert_tdm_rx_4_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(TERTIARY_TDM_RX_4) };
+
+static const struct snd_kcontrol_new tert_tdm_rx_5_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(TERTIARY_TDM_RX_5) };
+
+static const struct snd_kcontrol_new tert_tdm_rx_6_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(TERTIARY_TDM_RX_6) };
+
+static const struct snd_kcontrol_new tert_tdm_rx_7_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(TERTIARY_TDM_RX_7) };
+
+static const struct snd_kcontrol_new quat_tdm_rx_0_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUATERNARY_TDM_RX_0) };
+
+static const struct snd_kcontrol_new quat_tdm_rx_1_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUATERNARY_TDM_RX_1) };
+
+static const struct snd_kcontrol_new quat_tdm_rx_2_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUATERNARY_TDM_RX_2) };
+
+static const struct snd_kcontrol_new quat_tdm_rx_3_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUATERNARY_TDM_RX_3) };
+
+static const struct snd_kcontrol_new quat_tdm_rx_4_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUATERNARY_TDM_RX_4) };
+
+static const struct snd_kcontrol_new quat_tdm_rx_5_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUATERNARY_TDM_RX_5) };
+
+static const struct snd_kcontrol_new quat_tdm_rx_6_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUATERNARY_TDM_RX_6) };
+
+static const struct snd_kcontrol_new quat_tdm_rx_7_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUATERNARY_TDM_RX_7) };
+
+static const struct snd_kcontrol_new quin_tdm_rx_0_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUINARY_TDM_RX_0) };
+
+static const struct snd_kcontrol_new quin_tdm_rx_1_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUINARY_TDM_RX_1) };
+
+static const struct snd_kcontrol_new quin_tdm_rx_2_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUINARY_TDM_RX_2) };
+
+static const struct snd_kcontrol_new quin_tdm_rx_3_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUINARY_TDM_RX_3) };
+
+static const struct snd_kcontrol_new quin_tdm_rx_4_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUINARY_TDM_RX_4) };
+
+static const struct snd_kcontrol_new quin_tdm_rx_5_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUINARY_TDM_RX_5) };
+
+static const struct snd_kcontrol_new quin_tdm_rx_6_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUINARY_TDM_RX_6) };
+
+static const struct snd_kcontrol_new quin_tdm_rx_7_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUINARY_TDM_RX_7) };
+
+
+static const struct snd_kcontrol_new mmul1_mixer_controls[] = {
+ Q6ROUTING_TX_MIXERS(MSM_FRONTEND_DAI_MULTIMEDIA1) };
+
+static const struct snd_kcontrol_new mmul2_mixer_controls[] = {
+ Q6ROUTING_TX_MIXERS(MSM_FRONTEND_DAI_MULTIMEDIA2) };
+
+static const struct snd_kcontrol_new mmul3_mixer_controls[] = {
+ Q6ROUTING_TX_MIXERS(MSM_FRONTEND_DAI_MULTIMEDIA3) };
+
+static const struct snd_kcontrol_new mmul4_mixer_controls[] = {
+ Q6ROUTING_TX_MIXERS(MSM_FRONTEND_DAI_MULTIMEDIA4) };
+
+static const struct snd_kcontrol_new mmul5_mixer_controls[] = {
+ Q6ROUTING_TX_MIXERS(MSM_FRONTEND_DAI_MULTIMEDIA5) };
+
+static const struct snd_kcontrol_new mmul6_mixer_controls[] = {
+ Q6ROUTING_TX_MIXERS(MSM_FRONTEND_DAI_MULTIMEDIA6) };
+
+static const struct snd_kcontrol_new mmul7_mixer_controls[] = {
+ Q6ROUTING_TX_MIXERS(MSM_FRONTEND_DAI_MULTIMEDIA7) };
+
+static const struct snd_kcontrol_new mmul8_mixer_controls[] = {
+ Q6ROUTING_TX_MIXERS(MSM_FRONTEND_DAI_MULTIMEDIA8) };
+=======
+ SOC_SINGLE_EXT("MultiMedia1", AFE_PORT_HDMI_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0,
+ msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia2", AFE_PORT_HDMI_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0,
+ msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia3", AFE_PORT_HDMI_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0,
+ msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia4", AFE_PORT_HDMI_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0,
+ msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia5", AFE_PORT_HDMI_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0,
+ msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia6", AFE_PORT_HDMI_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0,
+ msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia7", AFE_PORT_HDMI_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0,
+ msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia8", AFE_PORT_HDMI_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0,
+ msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new primary_mi2s_rx_mixer_controls[] = {
+ SOC_SINGLE_EXT("MultiMedia1", PRIMARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia2", PRIMARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia3", PRIMARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia4", PRIMARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia5", PRIMARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia6", PRIMARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia7", PRIMARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia8", PRIMARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new secondary_mi2s_rx_mixer_controls[] = {
+ SOC_SINGLE_EXT("MultiMedia1", SECONDARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia2", SECONDARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia3", SECONDARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia4", SECONDARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia5", SECONDARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia6", SECONDARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia7", SECONDARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia8", SECONDARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new quaternary_mi2s_rx_mixer_controls[] = {
+ SOC_SINGLE_EXT("MultiMedia1", QUATERNARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia2", QUATERNARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia3", QUATERNARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia4", QUATERNARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia5", QUATERNARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia6", QUATERNARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia7", QUATERNARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia8", QUATERNARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new tertiary_mi2s_rx_mixer_controls[] = {
+ SOC_SINGLE_EXT("MultiMedia1", TERTIARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia2", TERTIARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia3", TERTIARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia4", TERTIARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
+
+static const struct snd_kcontrol_new slimbus_rx_mixer_controls[] = {
+ SOC_SINGLE_EXT("MultiMedia1", SLIMBUS_0_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia2", SLIMBUS_0_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia3", SLIMBUS_0_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia4", SLIMBUS_0_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia5", SLIMBUS_0_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia6", SLIMBUS_0_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia7", SLIMBUS_0_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia8", SLIMBUS_0_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new slimbus_1_rx_mixer_controls[] = {
+ SOC_SINGLE_EXT("MultiMedia1", SLIMBUS_1_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia2", SLIMBUS_1_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia3", SLIMBUS_1_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia4", SLIMBUS_1_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia5", SLIMBUS_1_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia6", SLIMBUS_1_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia7", SLIMBUS_1_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia8", SLIMBUS_1_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new slimbus_2_rx_mixer_controls[] = {
+ SOC_SINGLE_EXT("MultiMedia1", SLIMBUS_2_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia2", SLIMBUS_2_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia3", SLIMBUS_2_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia4", SLIMBUS_2_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia5", SLIMBUS_2_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia6", SLIMBUS_2_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia7", SLIMBUS_2_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia8", SLIMBUS_2_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new slimbus_3_rx_mixer_controls[] = {
+ SOC_SINGLE_EXT("MultiMedia1", SLIMBUS_3_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia2", SLIMBUS_3_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia3", SLIMBUS_3_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia4", SLIMBUS_3_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia5", SLIMBUS_3_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia6", SLIMBUS_3_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia7", SLIMBUS_3_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia8", SLIMBUS_3_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new slimbus_4_rx_mixer_controls[] = {
+ SOC_SINGLE_EXT("MultiMedia1", SLIMBUS_4_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia2", SLIMBUS_4_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia5", SLIMBUS_4_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new slimbus_5_rx_mixer_controls[] = {
+ SOC_SINGLE_EXT("MultiMedia1", SLIMBUS_5_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia2", SLIMBUS_5_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia3", SLIMBUS_5_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia4", SLIMBUS_5_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia5", SLIMBUS_5_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia6", SLIMBUS_5_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia7", SLIMBUS_5_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia8", SLIMBUS_5_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new slimbus_6_rx_mixer_controls[] = {
+ SOC_SINGLE_EXT("MultiMedia1", SLIMBUS_6_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia2", SLIMBUS_6_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia3", SLIMBUS_6_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia4", SLIMBUS_6_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia5", SLIMBUS_6_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia6", SLIMBUS_6_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia7", SLIMBUS_6_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia8", SLIMBUS_6_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new mmul1_mixer_controls[] = {
+ SOC_SINGLE_EXT("PRI_MI2S_TX", PRIMARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_MI2S_TX", QUATERNARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_MI2S_TX", TERTIARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SEC_MI2S_TX", SECONDARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new mmul2_mixer_controls[] = {
+ SOC_SINGLE_EXT("PRI_MI2S_TX", PRIMARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_MI2S_TX", QUATERNARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_MI2S_TX", TERTIARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SEC_MI2S_TX", SECONDARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new mmul4_mixer_controls[] = {
+ SOC_SINGLE_EXT("PRI_MI2S_TX", PRIMARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_MI2S_TX", QUATERNARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_MI2S_TX", TERTIARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SEC_MI2S_TX", SECONDARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+>>>>>>>
+
+static const struct snd_soc_dapm_widget msm_qdsp6_widgets[] = {
+ /* Frontend AIF */
+ SND_SOC_DAPM_AIF_IN("MM_DL1", "MultiMedia1 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("MM_DL2", "MultiMedia2 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("MM_DL3", "MultiMedia3 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("MM_DL4", "MultiMedia4 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("MM_DL5", "MultiMedia5 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("MM_DL6", "MultiMedia6 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("MM_DL7", "MultiMedia7 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("MM_DL8", "MultiMedia8 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("MM_UL1", "MultiMedia1 Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("MM_UL2", "MultiMedia2 Capture", 0, 0, 0, 0),
+<<<<<<<
+ SND_SOC_DAPM_AIF_OUT("MM_UL3", "MultiMedia3 Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("MM_UL4", "MultiMedia4 Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("MM_UL5", "MultiMedia5 Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("MM_UL6", "MultiMedia6 Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("MM_UL7", "MultiMedia7 Capture", 0, 0, 0, 0),
+=======
+ SND_SOC_DAPM_AIF_OUT("MM_UL4", "MultiMedia4 Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("MM_UL5", "MultiMedia5 Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("MM_UL6", "MultiMedia6 Capture", 0, 0, 0, 0),
+>>>>>>>
+ SND_SOC_DAPM_AIF_OUT("MM_UL8", "MultiMedia8 Capture", 0, 0, 0, 0),
+
+ /* Mixer definitions */
+ SND_SOC_DAPM_MIXER("HDMI Mixer", SND_SOC_NOPM, 0, 0,
+ hdmi_mixer_controls,
+ ARRAY_SIZE(hdmi_mixer_controls)),
+
+ SND_SOC_DAPM_MIXER("SLIMBUS_0_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+ slimbus_rx_mixer_controls,
+ ARRAY_SIZE(slimbus_rx_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SLIMBUS_1_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+ slimbus_1_rx_mixer_controls,
+ ARRAY_SIZE(slimbus_1_rx_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SLIMBUS_2_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+ slimbus_2_rx_mixer_controls,
+ ARRAY_SIZE(slimbus_2_rx_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SLIMBUS_3_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+ slimbus_3_rx_mixer_controls,
+ ARRAY_SIZE(slimbus_3_rx_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SLIMBUS_4_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+ slimbus_4_rx_mixer_controls,
+ ARRAY_SIZE(slimbus_4_rx_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SLIMBUS_5_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+ slimbus_5_rx_mixer_controls,
+ ARRAY_SIZE(slimbus_5_rx_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SLIMBUS_6_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+ slimbus_6_rx_mixer_controls,
+ ARRAY_SIZE(slimbus_6_rx_mixer_controls)),
+ SND_SOC_DAPM_MIXER("PRI_MI2S_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+ primary_mi2s_rx_mixer_controls,
+ ARRAY_SIZE(primary_mi2s_rx_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SEC_MI2S_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+ secondary_mi2s_rx_mixer_controls,
+ ARRAY_SIZE(secondary_mi2s_rx_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUAT_MI2S_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quaternary_mi2s_rx_mixer_controls,
+ ARRAY_SIZE(quaternary_mi2s_rx_mixer_controls)),
+ SND_SOC_DAPM_MIXER("TERT_MI2S_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+ tertiary_mi2s_rx_mixer_controls,
+ ARRAY_SIZE(tertiary_mi2s_rx_mixer_controls)),
+<<<<<<<
+=======
+ SND_SOC_DAPM_MIXER("PRIMARY_TDM_RX_0 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ pri_tdm_rx_0_mixer_controls,
+ ARRAY_SIZE(pri_tdm_rx_0_mixer_controls)),
+ SND_SOC_DAPM_MIXER("PRIMARY_TDM_RX_1 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ pri_tdm_rx_1_mixer_controls,
+ ARRAY_SIZE(pri_tdm_rx_1_mixer_controls)),
+ SND_SOC_DAPM_MIXER("PRIMARY_TDM_RX_2 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ pri_tdm_rx_2_mixer_controls,
+ ARRAY_SIZE(pri_tdm_rx_2_mixer_controls)),
+ SND_SOC_DAPM_MIXER("PRIMARY_TDM_RX_3 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ pri_tdm_rx_3_mixer_controls,
+ ARRAY_SIZE(pri_tdm_rx_3_mixer_controls)),
+ SND_SOC_DAPM_MIXER("PRIMARY_TDM_RX_4 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ pri_tdm_rx_4_mixer_controls,
+ ARRAY_SIZE(pri_tdm_rx_4_mixer_controls)),
+ SND_SOC_DAPM_MIXER("PRIMARY_TDM_RX_5 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ pri_tdm_rx_5_mixer_controls,
+ ARRAY_SIZE(pri_tdm_rx_5_mixer_controls)),
+ SND_SOC_DAPM_MIXER("PRIMARY_TDM_RX_6 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ pri_tdm_rx_6_mixer_controls,
+ ARRAY_SIZE(pri_tdm_rx_6_mixer_controls)),
+ SND_SOC_DAPM_MIXER("PRIMARY_TDM_RX_7 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ pri_tdm_rx_7_mixer_controls,
+ ARRAY_SIZE(pri_tdm_rx_7_mixer_controls)),
+
+ SND_SOC_DAPM_MIXER("SEC_TDM_RX_0 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ sec_tdm_rx_0_mixer_controls,
+ ARRAY_SIZE(sec_tdm_rx_0_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SEC_TDM_RX_1 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ sec_tdm_rx_1_mixer_controls,
+ ARRAY_SIZE(sec_tdm_rx_1_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SEC_TDM_RX_2 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ sec_tdm_rx_2_mixer_controls,
+ ARRAY_SIZE(sec_tdm_rx_2_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SEC_TDM_RX_3 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ sec_tdm_rx_3_mixer_controls,
+ ARRAY_SIZE(sec_tdm_rx_3_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SEC_TDM_RX_4 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ sec_tdm_rx_4_mixer_controls,
+ ARRAY_SIZE(sec_tdm_rx_4_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SEC_TDM_RX_5 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ sec_tdm_rx_5_mixer_controls,
+ ARRAY_SIZE(sec_tdm_rx_5_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SEC_TDM_RX_6 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ sec_tdm_rx_6_mixer_controls,
+ ARRAY_SIZE(sec_tdm_rx_6_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SEC_TDM_RX_7 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ sec_tdm_rx_7_mixer_controls,
+ ARRAY_SIZE(sec_tdm_rx_7_mixer_controls)),
+
+ SND_SOC_DAPM_MIXER("TERT_TDM_RX_0 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ tert_tdm_rx_0_mixer_controls,
+ ARRAY_SIZE(tert_tdm_rx_0_mixer_controls)),
+ SND_SOC_DAPM_MIXER("TERT_TDM_RX_1 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ tert_tdm_rx_1_mixer_controls,
+ ARRAY_SIZE(tert_tdm_rx_1_mixer_controls)),
+ SND_SOC_DAPM_MIXER("TERT_TDM_RX_2 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ tert_tdm_rx_2_mixer_controls,
+ ARRAY_SIZE(tert_tdm_rx_2_mixer_controls)),
+ SND_SOC_DAPM_MIXER("TERT_TDM_RX_3 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ tert_tdm_rx_3_mixer_controls,
+ ARRAY_SIZE(tert_tdm_rx_3_mixer_controls)),
+ SND_SOC_DAPM_MIXER("TERT_TDM_RX_4 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ tert_tdm_rx_4_mixer_controls,
+ ARRAY_SIZE(tert_tdm_rx_4_mixer_controls)),
+ SND_SOC_DAPM_MIXER("TERT_TDM_RX_5 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ tert_tdm_rx_5_mixer_controls,
+ ARRAY_SIZE(tert_tdm_rx_5_mixer_controls)),
+ SND_SOC_DAPM_MIXER("TERT_TDM_RX_6 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ tert_tdm_rx_6_mixer_controls,
+ ARRAY_SIZE(tert_tdm_rx_6_mixer_controls)),
+ SND_SOC_DAPM_MIXER("TERT_TDM_RX_7 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ tert_tdm_rx_7_mixer_controls,
+ ARRAY_SIZE(tert_tdm_rx_7_mixer_controls)),
+
+ SND_SOC_DAPM_MIXER("QUAT_TDM_RX_0 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quat_tdm_rx_0_mixer_controls,
+ ARRAY_SIZE(quat_tdm_rx_0_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUAT_TDM_RX_1 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quat_tdm_rx_1_mixer_controls,
+ ARRAY_SIZE(quat_tdm_rx_1_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUAT_TDM_RX_2 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quat_tdm_rx_2_mixer_controls,
+ ARRAY_SIZE(quat_tdm_rx_2_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUAT_TDM_RX_3 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quat_tdm_rx_3_mixer_controls,
+ ARRAY_SIZE(quat_tdm_rx_3_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUAT_TDM_RX_4 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quat_tdm_rx_4_mixer_controls,
+ ARRAY_SIZE(quat_tdm_rx_4_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUAT_TDM_RX_5 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quat_tdm_rx_5_mixer_controls,
+ ARRAY_SIZE(quat_tdm_rx_5_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUAT_TDM_RX_6 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quat_tdm_rx_6_mixer_controls,
+ ARRAY_SIZE(quat_tdm_rx_6_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUAT_TDM_RX_7 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quat_tdm_rx_7_mixer_controls,
+ ARRAY_SIZE(quat_tdm_rx_7_mixer_controls)),
+
+ SND_SOC_DAPM_MIXER("QUIN_TDM_RX_0 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quin_tdm_rx_0_mixer_controls,
+ ARRAY_SIZE(quin_tdm_rx_0_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUIN_TDM_RX_1 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quin_tdm_rx_1_mixer_controls,
+ ARRAY_SIZE(quin_tdm_rx_1_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUIN_TDM_RX_2 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quin_tdm_rx_2_mixer_controls,
+ ARRAY_SIZE(quin_tdm_rx_2_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUIN_TDM_RX_3 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quin_tdm_rx_3_mixer_controls,
+ ARRAY_SIZE(quin_tdm_rx_3_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUIN_TDM_RX_4 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quin_tdm_rx_4_mixer_controls,
+ ARRAY_SIZE(quin_tdm_rx_4_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUIN_TDM_RX_5 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quin_tdm_rx_5_mixer_controls,
+ ARRAY_SIZE(quin_tdm_rx_5_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUIN_TDM_RX_6 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quin_tdm_rx_6_mixer_controls,
+ ARRAY_SIZE(quin_tdm_rx_6_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUIN_TDM_RX_7 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quin_tdm_rx_7_mixer_controls,
+ ARRAY_SIZE(quin_tdm_rx_7_mixer_controls)),
+>>>>>>>
+ SND_SOC_DAPM_MIXER("MultiMedia1 Mixer", SND_SOC_NOPM, 0, 0,
+ mmul1_mixer_controls, ARRAY_SIZE(mmul1_mixer_controls)),
+ SND_SOC_DAPM_MIXER("MultiMedia2 Mixer", SND_SOC_NOPM, 0, 0,
+ mmul2_mixer_controls, ARRAY_SIZE(mmul2_mixer_controls)),
+<<<<<<<
+ SND_SOC_DAPM_MIXER("MultiMedia3 Mixer", SND_SOC_NOPM, 0, 0,
+ mmul3_mixer_controls, ARRAY_SIZE(mmul3_mixer_controls)),
+ SND_SOC_DAPM_MIXER("MultiMedia4 Mixer", SND_SOC_NOPM, 0, 0,
+ mmul4_mixer_controls, ARRAY_SIZE(mmul4_mixer_controls)),
+ SND_SOC_DAPM_MIXER("MultiMedia5 Mixer", SND_SOC_NOPM, 0, 0,
+ mmul5_mixer_controls, ARRAY_SIZE(mmul5_mixer_controls)),
+ SND_SOC_DAPM_MIXER("MultiMedia6 Mixer", SND_SOC_NOPM, 0, 0,
+ mmul6_mixer_controls, ARRAY_SIZE(mmul6_mixer_controls)),
+ SND_SOC_DAPM_MIXER("MultiMedia7 Mixer", SND_SOC_NOPM, 0, 0,
+ mmul7_mixer_controls, ARRAY_SIZE(mmul7_mixer_controls)),
+ SND_SOC_DAPM_MIXER("MultiMedia8 Mixer", SND_SOC_NOPM, 0, 0,
+ mmul8_mixer_controls, ARRAY_SIZE(mmul8_mixer_controls)),
+=======
+ SND_SOC_DAPM_MIXER("MultiMedia4 Mixer", SND_SOC_NOPM, 0, 0,
+ mmul4_mixer_controls, ARRAY_SIZE(mmul4_mixer_controls)),
+>>>>>>>
+
+};
+
+static const struct snd_soc_dapm_route intercon[] = {
+<<<<<<<
+ Q6ROUTING_RX_DAPM_ROUTE("HDMI Mixer", "HDMI_RX"),
+ Q6ROUTING_RX_DAPM_ROUTE("SLIMBUS_0_RX Audio Mixer", "SLIMBUS_0_RX"),
+ Q6ROUTING_RX_DAPM_ROUTE("SLIMBUS_1_RX Audio Mixer", "SLIMBUS_1_RX"),
+ Q6ROUTING_RX_DAPM_ROUTE("SLIMBUS_2_RX Audio Mixer", "SLIMBUS_2_RX"),
+ Q6ROUTING_RX_DAPM_ROUTE("SLIMBUS_3_RX Audio Mixer", "SLIMBUS_3_RX"),
+ Q6ROUTING_RX_DAPM_ROUTE("SLIMBUS_4_RX Audio Mixer", "SLIMBUS_4_RX"),
+ Q6ROUTING_RX_DAPM_ROUTE("SLIMBUS_5_RX Audio Mixer", "SLIMBUS_5_RX"),
+ Q6ROUTING_RX_DAPM_ROUTE("SLIMBUS_6_RX Audio Mixer", "SLIMBUS_6_RX"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUAT_MI2S_RX Audio Mixer", "QUAT_MI2S_RX"),
+ Q6ROUTING_RX_DAPM_ROUTE("TERT_MI2S_RX Audio Mixer", "TERT_MI2S_RX"),
+ Q6ROUTING_RX_DAPM_ROUTE("SEC_MI2S_RX Audio Mixer", "SEC_MI2S_RX"),
+ Q6ROUTING_RX_DAPM_ROUTE("PRI_MI2S_RX Audio Mixer", "PRI_MI2S_RX"),
+ Q6ROUTING_RX_DAPM_ROUTE("PRIMARY_TDM_RX_0 Audio Mixer",
+ "PRIMARY_TDM_RX_0"),
+ Q6ROUTING_RX_DAPM_ROUTE("PRIMARY_TDM_RX_1 Audio Mixer",
+ "PRIMARY_TDM_RX_1"),
+ Q6ROUTING_RX_DAPM_ROUTE("PRIMARY_TDM_RX_2 Audio Mixer",
+ "PRIMARY_TDM_RX_2"),
+ Q6ROUTING_RX_DAPM_ROUTE("PRIMARY_TDM_RX_3 Audio Mixer",
+ "PRIMARY_TDM_RX_3"),
+ Q6ROUTING_RX_DAPM_ROUTE("PRIMARY_TDM_RX_4 Audio Mixer",
+ "PRIMARY_TDM_RX_4"),
+ Q6ROUTING_RX_DAPM_ROUTE("PRIMARY_TDM_RX_5 Audio Mixer",
+ "PRIMARY_TDM_RX_5"),
+ Q6ROUTING_RX_DAPM_ROUTE("PRIMARY_TDM_RX_6 Audio Mixer",
+ "PRIMARY_TDM_RX_6"),
+ Q6ROUTING_RX_DAPM_ROUTE("PRIMARY_TDM_RX_7 Audio Mixer",
+ "PRIMARY_TDM_RX_7"),
+ Q6ROUTING_RX_DAPM_ROUTE("SEC_TDM_RX_0 Audio Mixer", "SEC_TDM_RX_0"),
+ Q6ROUTING_RX_DAPM_ROUTE("SEC_TDM_RX_1 Audio Mixer", "SEC_TDM_RX_1"),
+ Q6ROUTING_RX_DAPM_ROUTE("SEC_TDM_RX_2 Audio Mixer", "SEC_TDM_RX_2"),
+ Q6ROUTING_RX_DAPM_ROUTE("SEC_TDM_RX_3 Audio Mixer", "SEC_TDM_RX_3"),
+ Q6ROUTING_RX_DAPM_ROUTE("SEC_TDM_RX_4 Audio Mixer", "SEC_TDM_RX_4"),
+ Q6ROUTING_RX_DAPM_ROUTE("SEC_TDM_RX_5 Audio Mixer", "SEC_TDM_RX_5"),
+ Q6ROUTING_RX_DAPM_ROUTE("SEC_TDM_RX_6 Audio Mixer", "SEC_TDM_RX_6"),
+ Q6ROUTING_RX_DAPM_ROUTE("SEC_TDM_RX_7 Audio Mixer", "SEC_TDM_RX_7"),
+ Q6ROUTING_RX_DAPM_ROUTE("TERT_TDM_RX_0 Audio Mixer", "TERT_TDM_RX_0"),
+ Q6ROUTING_RX_DAPM_ROUTE("TERT_TDM_RX_1 Audio Mixer", "TERT_TDM_RX_1"),
+ Q6ROUTING_RX_DAPM_ROUTE("TERT_TDM_RX_2 Audio Mixer", "TERT_TDM_RX_2"),
+ Q6ROUTING_RX_DAPM_ROUTE("TERT_TDM_RX_3 Audio Mixer", "TERT_TDM_RX_3"),
+ Q6ROUTING_RX_DAPM_ROUTE("TERT_TDM_RX_4 Audio Mixer", "TERT_TDM_RX_4"),
+ Q6ROUTING_RX_DAPM_ROUTE("TERT_TDM_RX_5 Audio Mixer", "TERT_TDM_RX_5"),
+ Q6ROUTING_RX_DAPM_ROUTE("TERT_TDM_RX_6 Audio Mixer", "TERT_TDM_RX_6"),
+ Q6ROUTING_RX_DAPM_ROUTE("TERT_TDM_RX_7 Audio Mixer", "TERT_TDM_RX_7"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUAT_TDM_RX_0 Audio Mixer", "QUAT_TDM_RX_0"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUAT_TDM_RX_1 Audio Mixer", "QUAT_TDM_RX_1"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUAT_TDM_RX_2 Audio Mixer", "QUAT_TDM_RX_2"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUAT_TDM_RX_3 Audio Mixer", "QUAT_TDM_RX_3"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUAT_TDM_RX_4 Audio Mixer", "QUAT_TDM_RX_4"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUAT_TDM_RX_5 Audio Mixer", "QUAT_TDM_RX_5"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUAT_TDM_RX_6 Audio Mixer", "QUAT_TDM_RX_6"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUAT_TDM_RX_7 Audio Mixer", "QUAT_TDM_RX_7"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUIN_TDM_RX_0 Audio Mixer", "QUIN_TDM_RX_0"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUIN_TDM_RX_1 Audio Mixer", "QUIN_TDM_RX_1"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUIN_TDM_RX_2 Audio Mixer", "QUIN_TDM_RX_2"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUIN_TDM_RX_3 Audio Mixer", "QUIN_TDM_RX_3"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUIN_TDM_RX_4 Audio Mixer", "QUIN_TDM_RX_4"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUIN_TDM_RX_5 Audio Mixer", "QUIN_TDM_RX_5"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUIN_TDM_RX_6 Audio Mixer", "QUIN_TDM_RX_6"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUIN_TDM_RX_7 Audio Mixer", "QUIN_TDM_RX_7"),
+ Q6ROUTING_TX_DAPM_ROUTE("MultiMedia1 Mixer"),
+ Q6ROUTING_TX_DAPM_ROUTE("MultiMedia2 Mixer"),
+ Q6ROUTING_TX_DAPM_ROUTE("MultiMedia3 Mixer"),
+ Q6ROUTING_TX_DAPM_ROUTE("MultiMedia4 Mixer"),
+ Q6ROUTING_TX_DAPM_ROUTE("MultiMedia5 Mixer"),
+ Q6ROUTING_TX_DAPM_ROUTE("MultiMedia6 Mixer"),
+ Q6ROUTING_TX_DAPM_ROUTE("MultiMedia7 Mixer"),
+ Q6ROUTING_TX_DAPM_ROUTE("MultiMedia8 Mixer"),
+
+ {"MM_UL1", NULL, "MultiMedia1 Mixer"},
+ {"MM_UL2", NULL, "MultiMedia2 Mixer"},
+ {"MM_UL3", NULL, "MultiMedia3 Mixer"},
+ {"MM_UL4", NULL, "MultiMedia4 Mixer"},
+ {"MM_UL5", NULL, "MultiMedia5 Mixer"},
+ {"MM_UL6", NULL, "MultiMedia6 Mixer"},
+ {"MM_UL7", NULL, "MultiMedia7 Mixer"},
+ {"MM_UL8", NULL, "MultiMedia8 Mixer"},
+=======
+ {"HDMI Mixer", "MultiMedia1", "MM_DL1"},
+ {"HDMI Mixer", "MultiMedia2", "MM_DL2"},
+ {"HDMI Mixer", "MultiMedia3", "MM_DL3"},
+ {"HDMI Mixer", "MultiMedia4", "MM_DL4"},
+ {"HDMI Mixer", "MultiMedia5", "MM_DL5"},
+ {"HDMI Mixer", "MultiMedia6", "MM_DL6"},
+ {"HDMI Mixer", "MultiMedia7", "MM_DL7"},
+ {"HDMI Mixer", "MultiMedia8", "MM_DL8"},
+ {"HDMI_RX", NULL, "HDMI Mixer"},
+
+ {"SLIMBUS_0_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+ {"SLIMBUS_0_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+ {"SLIMBUS_0_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+ {"SLIMBUS_0_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+ {"SLIMBUS_0_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+ {"SLIMBUS_0_RX Audio Mixer", "MultiMedia6", "MM_DL6"},
+ {"SLIMBUS_0_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+ {"SLIMBUS_0_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+ {"SLIMBUS_0_RX", NULL, "SLIMBUS_0_RX Audio Mixer"},
+
+ {"SLIMBUS_1_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+ {"SLIMBUS_1_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+ {"SLIMBUS_1_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+ {"SLIMBUS_1_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+ {"SLIMBUS_1_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+ {"SLIMBUS_1_RX Audio Mixer", "MultiMedia6", "MM_DL6"},
+ {"SLIMBUS_1_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+ {"SLIMBUS_1_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+ {"SLIMBUS_1_RX", NULL, "SLIMBUS_1_RX Audio Mixer"},
+
+ {"SLIMBUS_2_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+ {"SLIMBUS_2_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+ {"SLIMBUS_2_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+ {"SLIMBUS_2_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+ {"SLIMBUS_2_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+ {"SLIMBUS_2_RX Audio Mixer", "MultiMedia6", "MM_DL6"},
+ {"SLIMBUS_2_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+ {"SLIMBUS_2_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+ {"SLIMBUS_2_RX", NULL, "SLIMBUS_2_RX Audio Mixer"},
+
+ {"SLIMBUS_3_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+ {"SLIMBUS_3_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+ {"SLIMBUS_3_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+ {"SLIMBUS_3_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+ {"SLIMBUS_3_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+ {"SLIMBUS_3_RX Audio Mixer", "MultiMedia6", "MM_DL6"},
+ {"SLIMBUS_3_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+ {"SLIMBUS_3_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+ {"SLIMBUS_3_RX", NULL, "SLIMBUS_3_RX Audio Mixer"},
+
+ {"SLIMBUS_4_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+ {"SLIMBUS_4_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+ {"SLIMBUS_4_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+ {"SLIMBUS_4_RX", NULL, "SLIMBUS_4_RX Audio Mixer"},
+
+ {"SLIMBUS_5_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+ {"SLIMBUS_5_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+ {"SLIMBUS_5_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+ {"SLIMBUS_5_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+ {"SLIMBUS_5_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+ {"SLIMBUS_5_RX Audio Mixer", "MultiMedia6", "MM_DL6"},
+ {"SLIMBUS_5_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+ {"SLIMBUS_5_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+ {"SLIMBUS_5_RX", NULL, "SLIMBUS_5_RX Audio Mixer"},
+
+ {"SLIMBUS_6_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+ {"SLIMBUS_6_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+ {"SLIMBUS_6_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+ {"SLIMBUS_6_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+ {"SLIMBUS_6_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+ {"SLIMBUS_6_RX Audio Mixer", "MultiMedia6", "MM_DL6"},
+ {"SLIMBUS_6_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+ {"SLIMBUS_6_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+ {"SLIMBUS_6_RX", NULL, "SLIMBUS_6_RX Audio Mixer"},
+
+ {"QUAT_MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+ {"QUAT_MI2S_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+ {"QUAT_MI2S_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+ {"QUAT_MI2S_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+ {"QUAT_MI2S_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+ {"QUAT_MI2S_RX Audio Mixer", "MultiMedia6", "MM_DL6"},
+ {"QUAT_MI2S_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+ {"QUAT_MI2S_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+ {"QUAT_MI2S_RX", NULL, "QUAT_MI2S_RX Audio Mixer"},
+
+ {"TERT_MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+ {"TERT_MI2S_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+ {"TERT_MI2S_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+ {"TERT_MI2S_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+ {"TERT_MI2S_RX", NULL, "TERT_MI2S_RX Audio Mixer"},
+
+ {"SEC_MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+ {"SEC_MI2S_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+ {"SEC_MI2S_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+ {"SEC_MI2S_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+ {"SEC_MI2S_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+ {"SEC_MI2S_RX Audio Mixer", "MultiMedia6", "MM_DL5"},
+ {"SEC_MI2S_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+ {"SEC_MI2S_RX Audio Mixer", "MultiMedia8", "MM_DL7"},
+ {"SEC_MI2S_RX", NULL, "SEC_MI2S_RX Audio Mixer"},
+
+ {"PRI_MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+ {"PRI_MI2S_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+ {"PRI_MI2S_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+ {"PRI_MI2S_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+ {"PRI_MI2S_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+ {"PRI_MI2S_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+ {"PRI_MI2S_RX", NULL, "PRI_MI2S_RX Audio Mixer"},
+
+ {"MultiMedia1 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+ {"MultiMedia1 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+ {"MM_UL1", NULL, "MultiMedia1 Mixer"},
+>>>>>>>
+};
+
+static int routing_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+<<<<<<<
+ struct snd_soc_component *c = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
+ struct msm_routing_data *data = dev_get_drvdata(c->dev);
+ unsigned int be_id = rtd->cpu_dai->id;
+=======
+ unsigned int be_id = rtd->cpu_dai->id;
+ struct snd_soc_platform *platform = rtd->platform;
+ struct msm_routing_data *data = q6adm_get_routing_data(platform->dev);
+>>>>>>>
+ struct session_data *session;
+ int path_type;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ path_type = ADM_PATH_PLAYBACK;
+ else
+ path_type = ADM_PATH_LIVE_REC;
+
+ if (be_id > AFE_MAX_PORTS)
+ return -EINVAL;
+
+ session = &data->port_data[be_id];
+
+ mutex_lock(&data->lock);
+
+ session->path_type = path_type;
+ session->sample_rate = params_rate(params);
+ session->channels = params_channels(params);
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ session->bits_per_sample = 16;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ session->bits_per_sample = 24;
+ break;
+ default:
+ break;
+ }
+
+ mutex_unlock(&data->lock);
+ return 0;
+}
+
+static struct snd_pcm_ops q6pcm_routing_ops = {
+ .hw_params = routing_hw_params,
+};
+
+<<<<<<<
+static int msm_routing_probe(struct snd_soc_component *c)
+=======
+static int msm_routing_probe(struct snd_soc_platform *platform)
+>>>>>>>
+{
+ int i;
+
+ for (i = 0; i < MAX_SESSIONS; i++)
+ routing_data->sessions[i].port_id = -1;
+
+ return 0;
+}
+
+<<<<<<<
+static const struct snd_soc_component_driver msm_soc_routing_component = {
+ .ops = &q6pcm_routing_ops,
+ .probe = msm_routing_probe,
+ .name = DRV_NAME,
+ .dapm_widgets = msm_qdsp6_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(msm_qdsp6_widgets),
+ .dapm_routes = intercon,
+ .num_dapm_routes = ARRAY_SIZE(intercon),
+};
+
+static int q6routing_dai_bind(struct device *dev, struct device *master,
+ void *data)
+=======
+static struct snd_soc_platform_driver msm_soc_routing_platform = {
+ .ops = &q6pcm_routing_ops,
+ .probe = msm_routing_probe,
+ .component_driver = {
+ .name = "q6routing-component",
+ .dapm_widgets = msm_qdsp6_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(msm_qdsp6_widgets),
+ .dapm_routes = intercon,
+ .num_dapm_routes = ARRAY_SIZE(intercon),
+ },
+};
+
+int q6pcm_routing_probe(struct device *dev)
+>>>>>>>
+{
+ routing_data = kzalloc(sizeof(*routing_data), GFP_KERNEL);
+ if (!routing_data)
+ return -ENOMEM;
+
+ routing_data->dev = dev;
+
+ mutex_init(&routing_data->lock);
+<<<<<<<
+ dev_set_drvdata(dev, routing_data);
+
+ return snd_soc_register_component(dev, &msm_soc_routing_component,
+ NULL, 0);
+}
+
+static void q6routing_dai_unbind(struct device *dev, struct device *master,
+ void *d)
+{
+ struct msm_routing_data *data = dev_get_drvdata(dev);
+
+ snd_soc_unregister_component(dev);
+
+ kfree(data);
+
+ routing_data = NULL;
+}
+
+static const struct component_ops q6routing_dai_comp_ops = {
+ .bind = q6routing_dai_bind,
+ .unbind = q6routing_dai_unbind,
+};
+
+static int q6pcm_routing_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &q6routing_dai_comp_ops);
+}
+
+static int q6pcm_routing_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &q6routing_dai_comp_ops);
+ return 0;
+}
+
+static struct platform_driver q6pcm_routing_platform_driver = {
+ .driver = {
+ .name = "q6routing",
+ },
+ .probe = q6pcm_routing_probe,
+ .remove = q6pcm_routing_remove,
+};
+module_platform_driver(q6pcm_routing_platform_driver);
+
+=======
+ q6adm_set_routing_data(dev, routing_data);
+
+ return devm_snd_soc_register_platform(dev,
+ &msm_soc_routing_platform);
+}
+EXPORT_SYMBOL_GPL(q6pcm_routing_probe);
+
+int q6pcm_routing_remove(struct device *dev)
+{
+ kfree(routing_data);
+
+ routing_data = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(q6pcm_routing_remove);
+>>>>>>>
+MODULE_DESCRIPTION("Q6 Routing platform");
+MODULE_LICENSE("GPL v2");
diff --git a/rr-cache/ccea6594d754c8948c1cb70fbd1a0b1c81c949a2/thisimage b/rr-cache/ccea6594d754c8948c1cb70fbd1a0b1c81c949a2/thisimage
new file mode 100644
index 0000000..e191103
--- /dev/null
+++ b/rr-cache/ccea6594d754c8948c1cb70fbd1a0b1c81c949a2/thisimage
@@ -0,0 +1,1583 @@
+// SPDX-License-Identifier: GPL-2.0
+<<<<<<<
+/*
+ * Copyright (c) 2011-2017, The Linux Foundation
+ * Copyright (c) 2018, Linaro Limited
+ */
+=======
+// Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+// Copyright (c) 2018, Linaro Limited
+>>>>>>>
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+<<<<<<<
+#include <linux/bitops.h>
+=======
+#include <linux/of_platform.h>
+#include <linux/bitops.h>
+#include <linux/component.h>
+>>>>>>>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include <sound/control.h>
+#include <sound/asound.h>
+#include <sound/pcm_params.h>
+#include "q6afe.h"
+#include "q6asm.h"
+#include "q6adm.h"
+#include "q6routing.h"
+
+<<<<<<<
+=======
+#define DRV_NAME "q6routing-component"
+
+#define Q6ROUTING_RX_MIXERS(id) \
+ SOC_SINGLE_EXT("MultiMedia1", id, \
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,\
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("MultiMedia2", id, \
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,\
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("MultiMedia3", id, \
+ MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,\
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("MultiMedia4", id, \
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,\
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("MultiMedia5", id, \
+ MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,\
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("MultiMedia6", id, \
+ MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,\
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("MultiMedia7", id, \
+ MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,\
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("MultiMedia8", id, \
+ MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,\
+ msm_routing_put_audio_mixer),
+
+#define Q6ROUTING_RX_DAPM_ROUTE(mix_name, s) \
+ { mix_name, "MultiMedia1", "MM_DL1" }, \
+ { mix_name, "MultiMedia2", "MM_DL2" }, \
+ { mix_name, "MultiMedia3", "MM_DL3" }, \
+ { mix_name, "MultiMedia4", "MM_DL4" }, \
+ { mix_name, "MultiMedia5", "MM_DL5" }, \
+ { mix_name, "MultiMedia6", "MM_DL6" }, \
+ { mix_name, "MultiMedia7", "MM_DL7" }, \
+ { mix_name, "MultiMedia8", "MM_DL8" }, \
+ { s, NULL, mix_name }
+
+#define Q6ROUTING_TX_DAPM_ROUTE(mix_name) \
+ { mix_name, "PRI_MI2S_TX", "PRI_MI2S_TX" }, \
+ { mix_name, "SEC_MI2S_TX", "SEC_MI2S_TX" }, \
+ { mix_name, "QUAT_MI2S_TX", "QUAT_MI2S_TX" }, \
+ { mix_name, "TERT_MI2S_TX", "TERT_MI2S_TX" }, \
+ { mix_name, "PRIMARY_TDM_TX_0", "PRIMARY_TDM_TX_0"}, \
+ { mix_name, "PRIMARY_TDM_TX_1", "PRIMARY_TDM_TX_1"}, \
+ { mix_name, "PRIMARY_TDM_TX_2", "PRIMARY_TDM_TX_2"}, \
+ { mix_name, "PRIMARY_TDM_TX_3", "PRIMARY_TDM_TX_3"}, \
+ { mix_name, "PRIMARY_TDM_TX_4", "PRIMARY_TDM_TX_4"}, \
+ { mix_name, "PRIMARY_TDM_TX_5", "PRIMARY_TDM_TX_5"}, \
+ { mix_name, "PRIMARY_TDM_TX_6", "PRIMARY_TDM_TX_6"}, \
+ { mix_name, "PRIMARY_TDM_TX_7", "PRIMARY_TDM_TX_7"}, \
+ { mix_name, "SEC_TDM_TX_0", "SEC_TDM_TX_0"}, \
+ { mix_name, "SEC_TDM_TX_1", "SEC_TDM_TX_1"}, \
+ { mix_name, "SEC_TDM_TX_2", "SEC_TDM_TX_2"}, \
+ { mix_name, "SEC_TDM_TX_3", "SEC_TDM_TX_3"}, \
+ { mix_name, "SEC_TDM_TX_4", "SEC_TDM_TX_4"}, \
+ { mix_name, "SEC_TDM_TX_5", "SEC_TDM_TX_5"}, \
+ { mix_name, "SEC_TDM_TX_6", "SEC_TDM_TX_6"}, \
+ { mix_name, "SEC_TDM_TX_7", "SEC_TDM_TX_7"}, \
+ { mix_name, "TERT_TDM_TX_0", "TERT_TDM_TX_0"}, \
+ { mix_name, "TERT_TDM_TX_1", "TERT_TDM_TX_1"}, \
+ { mix_name, "TERT_TDM_TX_2", "TERT_TDM_TX_2"}, \
+ { mix_name, "TERT_TDM_TX_3", "TERT_TDM_TX_3"}, \
+ { mix_name, "TERT_TDM_TX_4", "TERT_TDM_TX_4"}, \
+ { mix_name, "TERT_TDM_TX_5", "TERT_TDM_TX_5"}, \
+ { mix_name, "TERT_TDM_TX_6", "TERT_TDM_TX_6"}, \
+ { mix_name, "TERT_TDM_TX_7", "TERT_TDM_TX_7"}, \
+ { mix_name, "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"}, \
+ { mix_name, "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"}, \
+ { mix_name, "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"}, \
+ { mix_name, "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"}, \
+ { mix_name, "QUAT_TDM_TX_4", "QUAT_TDM_TX_4"}, \
+ { mix_name, "QUAT_TDM_TX_5", "QUAT_TDM_TX_5"}, \
+ { mix_name, "QUAT_TDM_TX_6", "QUAT_TDM_TX_6"}, \
+ { mix_name, "QUAT_TDM_TX_7", "QUAT_TDM_TX_7"}, \
+ { mix_name, "QUIN_TDM_TX_0", "QUIN_TDM_TX_0"}, \
+ { mix_name, "QUIN_TDM_TX_1", "QUIN_TDM_TX_1"}, \
+ { mix_name, "QUIN_TDM_TX_2", "QUIN_TDM_TX_2"}, \
+ { mix_name, "QUIN_TDM_TX_3", "QUIN_TDM_TX_3"}, \
+ { mix_name, "QUIN_TDM_TX_4", "QUIN_TDM_TX_4"}, \
+ { mix_name, "QUIN_TDM_TX_5", "QUIN_TDM_TX_5"}, \
+ { mix_name, "QUIN_TDM_TX_6", "QUIN_TDM_TX_6"}, \
+ { mix_name, "QUIN_TDM_TX_7", "QUIN_TDM_TX_7"}
+
+#define Q6ROUTING_TX_MIXERS(id) \
+ SOC_SINGLE_EXT("PRI_MI2S_TX", PRIMARY_MI2S_TX, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("SEC_MI2S_TX", SECONDARY_MI2S_TX, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("TERT_MI2S_TX", TERTIARY_MI2S_TX, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUAT_MI2S_TX", QUATERNARY_MI2S_TX, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("PRIMARY_TDM_TX_0", PRIMARY_TDM_TX_0, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("PRIMARY_TDM_TX_1", PRIMARY_TDM_TX_1, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("PRIMARY_TDM_TX_2", PRIMARY_TDM_TX_2, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("PRIMARY_TDM_TX_3", PRIMARY_TDM_TX_3, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("PRIMARY_TDM_TX_4", PRIMARY_TDM_TX_4, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("PRIMARY_TDM_TX_5", PRIMARY_TDM_TX_5, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("PRIMARY_TDM_TX_6", PRIMARY_TDM_TX_6, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("PRIMARY_TDM_TX_7", PRIMARY_TDM_TX_7, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("SEC_TDM_TX_0", SECONDARY_TDM_TX_0, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("SEC_TDM_TX_1", SECONDARY_TDM_TX_1, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("SEC_TDM_TX_2", SECONDARY_TDM_TX_2, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("SEC_TDM_TX_3", SECONDARY_TDM_TX_3, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("SEC_TDM_TX_4", SECONDARY_TDM_TX_4, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("SEC_TDM_TX_5", SECONDARY_TDM_TX_5, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("SEC_TDM_TX_6", SECONDARY_TDM_TX_6, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("SEC_TDM_TX_7", SECONDARY_TDM_TX_7, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("TERT_TDM_TX_0", TERTIARY_TDM_TX_0, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("TERT_TDM_TX_1", TERTIARY_TDM_TX_1, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("TERT_TDM_TX_2", TERTIARY_TDM_TX_2, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("TERT_TDM_TX_3", TERTIARY_TDM_TX_3, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("TERT_TDM_TX_4", TERTIARY_TDM_TX_4, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("TERT_TDM_TX_5", TERTIARY_TDM_TX_5, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("TERT_TDM_TX_6", TERTIARY_TDM_TX_6, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("TERT_TDM_TX_7", TERTIARY_TDM_TX_7, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUAT_TDM_TX_0", QUATERNARY_TDM_TX_0, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUAT_TDM_TX_1", QUATERNARY_TDM_TX_1, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUAT_TDM_TX_2", QUATERNARY_TDM_TX_2, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUAT_TDM_TX_3", QUATERNARY_TDM_TX_3, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUAT_TDM_TX_4", QUATERNARY_TDM_TX_4, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUAT_TDM_TX_5", QUATERNARY_TDM_TX_5, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUAT_TDM_TX_6", QUATERNARY_TDM_TX_6, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUAT_TDM_TX_7", QUATERNARY_TDM_TX_7, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUIN_TDM_TX_0", QUINARY_TDM_TX_0, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUIN_TDM_TX_1", QUINARY_TDM_TX_1, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUIN_TDM_TX_2", QUINARY_TDM_TX_2, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUIN_TDM_TX_3", QUINARY_TDM_TX_3, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUIN_TDM_TX_4", QUINARY_TDM_TX_4, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUIN_TDM_TX_5", QUINARY_TDM_TX_5, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUIN_TDM_TX_6", QUINARY_TDM_TX_6, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUIN_TDM_TX_7", QUINARY_TDM_TX_7, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer),
+
+>>>>>>>
+struct session_data {
+ int state;
+ int port_id;
+ int path_type;
+ int app_type;
+ int acdb_id;
+ int sample_rate;
+ int bits_per_sample;
+ int channels;
+ int perf_mode;
+ int numcopps;
+ int fedai_id;
+ unsigned long copp_map;
+<<<<<<<
+=======
+ struct q6copp *copps[MAX_COPPS_PER_PORT];
+>>>>>>>
+};
+
+struct msm_routing_data {
+ struct session_data sessions[MAX_SESSIONS];
+ struct session_data port_data[AFE_MAX_PORTS];
+ struct device *dev;
+ struct mutex lock;
+};
+
+static struct msm_routing_data *routing_data;
+
+/**
+ * q6routing_stream_open() - Register a new stream for route setup
+ *
+ * @fedai_id: Frontend dai id.
+ * @perf_mode: Performance mode.
+ * @stream_id: ASM stream id to map.
+ * @stream_type: Direction of stream
+ *
+ * Return: Will be an negative on error or a zero on success.
+ */
+int q6routing_stream_open(int fedai_id, int perf_mode,
+ int stream_id, int stream_type)
+{
+ int j, topology, num_copps = 0;
+ struct route_payload payload;
+<<<<<<<
+=======
+ struct q6copp *copp;
+>>>>>>>
+ int copp_idx;
+ struct session_data *session, *pdata;
+
+ if (!routing_data) {
+ pr_err("Routing driver not yet ready\n");
+ return -EINVAL;
+ }
+
+ session = &routing_data->sessions[stream_id - 1];
+ pdata = &routing_data->port_data[session->port_id];
+
+ mutex_lock(&routing_data->lock);
+ session->fedai_id = fedai_id;
+
+ session->path_type = pdata->path_type;
+ session->sample_rate = pdata->sample_rate;
+ session->channels = pdata->channels;
+ session->bits_per_sample = pdata->bits_per_sample;
+
+ payload.num_copps = 0; /* only RX needs to use payload */
+ topology = NULL_COPP_TOPOLOGY;
+<<<<<<<
+ copp = q6adm_open(routing_data->dev, session->port_id,
+=======
+ copp_idx = q6adm_open(routing_data->dev, session->port_id,
+>>>>>>>
+ session->path_type, session->sample_rate,
+ session->channels, topology, perf_mode,
+ session->bits_per_sample, 0, 0);
+
+<<<<<<<
+ if (!copp) {
+=======
+ if (copp_idx < 0) {
+>>>>>>>
+ mutex_unlock(&routing_data->lock);
+ return -EINVAL;
+ }
+
+<<<<<<<
+ copp_idx = q6adm_get_copp_id(copp);
+ set_bit(copp_idx, &session->copp_map);
+ session->copps[copp_idx] = copp;
+=======
+ set_bit(copp_idx, &session->copp_map);
+>>>>>>>
+
+ for_each_set_bit(j, &session->copp_map, MAX_COPPS_PER_PORT) {
+ payload.port_id[num_copps] = session->port_id;
+ payload.copp_idx[num_copps] = j;
+ num_copps++;
+ }
+
+ if (num_copps) {
+ payload.num_copps = num_copps;
+ payload.session_id = stream_id;
+ q6adm_matrix_map(routing_data->dev, session->path_type,
+ payload, perf_mode);
+ }
+ mutex_unlock(&routing_data->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(q6routing_stream_open);
+
+static struct session_data *get_session_from_id(struct msm_routing_data *data,
+ int fedai_id)
+{
+ int i;
+
+ for (i = 0; i < MAX_SESSIONS; i++) {
+ if (fedai_id == data->sessions[i].fedai_id)
+ return &data->sessions[i];
+ }
+
+ return NULL;
+}
+/**
+ * q6routing_stream_close() - Deregister a stream
+ *
+ * @fedai_id: Frontend dai id.
+ * @stream_type: Direction of stream
+ *
+ * Return: Will be an negative on error or a zero on success.
+ */
+void q6routing_stream_close(int fedai_id, int stream_type)
+{
+ struct session_data *session;
+ int idx;
+
+ session = get_session_from_id(routing_data, fedai_id);
+ if (!session)
+ return;
+
+<<<<<<<
+ for_each_set_bit(idx, &session->copp_map, MAX_COPPS_PER_PORT)
+ q6adm_close(routing_data->dev, session->port_id,
+ session->perf_mode, idx);
+=======
+ for_each_set_bit(idx, &session->copp_map, MAX_COPPS_PER_PORT) {
+ if (session->copps[idx]) {
+ q6adm_close(routing_data->dev, session->copps[idx]);
+ session->copps[idx] = NULL;
+ }
+ }
+>>>>>>>
+
+ session->fedai_id = -1;
+ session->copp_map = 0;
+}
+EXPORT_SYMBOL_GPL(q6routing_stream_close);
+
+static int msm_routing_get_audio_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_dapm_kcontrol_dapm(kcontrol);
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ int session_id = mc->shift;
+<<<<<<<
+ struct snd_soc_component *c = snd_soc_dapm_to_component(dapm);
+ struct msm_routing_data *priv = dev_get_drvdata(c->dev);
+ struct session_data *session = &priv->sessions[session_id];
+
+ if (session->port_id == mc->reg)
+=======
+ struct snd_soc_platform *platform = snd_soc_dapm_to_platform(dapm);
+ struct msm_routing_data *priv = q6adm_get_routing_data(platform->dev);
+ struct session_data *session = &priv->sessions[session_id];
+
+ if (session->port_id != -1)
+>>>>>>>
+ ucontrol->value.integer.value[0] = 1;
+ else
+ ucontrol->value.integer.value[0] = 0;
+
+ return 0;
+}
+
+static int msm_routing_put_audio_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_dapm_kcontrol_dapm(kcontrol);
+<<<<<<<
+ struct snd_soc_component *c = snd_soc_dapm_to_component(dapm);
+ struct msm_routing_data *data = dev_get_drvdata(c->dev);
+=======
+ struct snd_soc_platform *platform = snd_soc_dapm_to_platform(dapm);
+ struct msm_routing_data *data = q6adm_get_routing_data(platform->dev);
+>>>>>>>
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ struct snd_soc_dapm_update *update = NULL;
+ int be_id = mc->reg;
+ int session_id = mc->shift;
+ struct session_data *session = &data->sessions[session_id];
+
+ if (ucontrol->value.integer.value[0]) {
+ session->port_id = be_id;
+ snd_soc_dapm_mixer_update_power(dapm, kcontrol, 1, update);
+ } else {
+ session->port_id = -1;
+ snd_soc_dapm_mixer_update_power(dapm, kcontrol, 0, update);
+ }
+
+ return 1;
+}
+
+static const struct snd_kcontrol_new hdmi_mixer_controls[] = {
+<<<<<<<
+ Q6ROUTING_RX_MIXERS(HDMI_RX) };
+
+static const struct snd_kcontrol_new primary_mi2s_rx_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(PRIMARY_MI2S_RX) };
+
+static const struct snd_kcontrol_new secondary_mi2s_rx_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SECONDARY_MI2S_RX) };
+
+static const struct snd_kcontrol_new quaternary_mi2s_rx_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUATERNARY_MI2S_RX) };
+
+static const struct snd_kcontrol_new tertiary_mi2s_rx_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(TERTIARY_MI2S_RX) };
+
+static const struct snd_kcontrol_new slimbus_rx_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SLIMBUS_0_RX) };
+
+static const struct snd_kcontrol_new slimbus_1_rx_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SLIMBUS_1_RX) };
+
+static const struct snd_kcontrol_new slimbus_2_rx_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SLIMBUS_2_RX) };
+
+static const struct snd_kcontrol_new slimbus_3_rx_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SLIMBUS_3_RX) };
+
+static const struct snd_kcontrol_new slimbus_4_rx_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SLIMBUS_4_RX) };
+
+static const struct snd_kcontrol_new slimbus_5_rx_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SLIMBUS_5_RX) };
+
+static const struct snd_kcontrol_new slimbus_6_rx_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SLIMBUS_6_RX) };
+
+static const struct snd_kcontrol_new pri_tdm_rx_0_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(PRIMARY_TDM_RX_0) };
+
+static const struct snd_kcontrol_new pri_tdm_rx_1_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(PRIMARY_TDM_RX_1) };
+
+static const struct snd_kcontrol_new pri_tdm_rx_2_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(PRIMARY_TDM_RX_2) };
+
+static const struct snd_kcontrol_new pri_tdm_rx_3_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(PRIMARY_TDM_RX_3) };
+
+static const struct snd_kcontrol_new pri_tdm_rx_4_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(PRIMARY_TDM_RX_4) };
+
+static const struct snd_kcontrol_new pri_tdm_rx_5_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(PRIMARY_TDM_RX_5) };
+
+static const struct snd_kcontrol_new pri_tdm_rx_6_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(PRIMARY_TDM_RX_6) };
+
+static const struct snd_kcontrol_new pri_tdm_rx_7_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(PRIMARY_TDM_RX_7) };
+
+static const struct snd_kcontrol_new sec_tdm_rx_0_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SECONDARY_TDM_RX_0) };
+
+static const struct snd_kcontrol_new sec_tdm_rx_1_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SECONDARY_TDM_RX_1) };
+
+static const struct snd_kcontrol_new sec_tdm_rx_2_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SECONDARY_TDM_RX_2) };
+
+static const struct snd_kcontrol_new sec_tdm_rx_3_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SECONDARY_TDM_RX_3) };
+
+static const struct snd_kcontrol_new sec_tdm_rx_4_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SECONDARY_TDM_RX_4) };
+
+static const struct snd_kcontrol_new sec_tdm_rx_5_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SECONDARY_TDM_RX_5) };
+
+static const struct snd_kcontrol_new sec_tdm_rx_6_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SECONDARY_TDM_RX_6) };
+
+static const struct snd_kcontrol_new sec_tdm_rx_7_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(SECONDARY_TDM_RX_7) };
+
+static const struct snd_kcontrol_new tert_tdm_rx_0_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(TERTIARY_TDM_RX_0) };
+
+static const struct snd_kcontrol_new tert_tdm_rx_1_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(TERTIARY_TDM_RX_1) };
+
+static const struct snd_kcontrol_new tert_tdm_rx_2_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(TERTIARY_TDM_RX_2) };
+
+static const struct snd_kcontrol_new tert_tdm_rx_3_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(TERTIARY_TDM_RX_3) };
+
+static const struct snd_kcontrol_new tert_tdm_rx_4_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(TERTIARY_TDM_RX_4) };
+
+static const struct snd_kcontrol_new tert_tdm_rx_5_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(TERTIARY_TDM_RX_5) };
+
+static const struct snd_kcontrol_new tert_tdm_rx_6_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(TERTIARY_TDM_RX_6) };
+
+static const struct snd_kcontrol_new tert_tdm_rx_7_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(TERTIARY_TDM_RX_7) };
+
+static const struct snd_kcontrol_new quat_tdm_rx_0_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUATERNARY_TDM_RX_0) };
+
+static const struct snd_kcontrol_new quat_tdm_rx_1_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUATERNARY_TDM_RX_1) };
+
+static const struct snd_kcontrol_new quat_tdm_rx_2_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUATERNARY_TDM_RX_2) };
+
+static const struct snd_kcontrol_new quat_tdm_rx_3_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUATERNARY_TDM_RX_3) };
+
+static const struct snd_kcontrol_new quat_tdm_rx_4_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUATERNARY_TDM_RX_4) };
+
+static const struct snd_kcontrol_new quat_tdm_rx_5_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUATERNARY_TDM_RX_5) };
+
+static const struct snd_kcontrol_new quat_tdm_rx_6_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUATERNARY_TDM_RX_6) };
+
+static const struct snd_kcontrol_new quat_tdm_rx_7_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUATERNARY_TDM_RX_7) };
+
+static const struct snd_kcontrol_new quin_tdm_rx_0_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUINARY_TDM_RX_0) };
+
+static const struct snd_kcontrol_new quin_tdm_rx_1_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUINARY_TDM_RX_1) };
+
+static const struct snd_kcontrol_new quin_tdm_rx_2_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUINARY_TDM_RX_2) };
+
+static const struct snd_kcontrol_new quin_tdm_rx_3_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUINARY_TDM_RX_3) };
+
+static const struct snd_kcontrol_new quin_tdm_rx_4_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUINARY_TDM_RX_4) };
+
+static const struct snd_kcontrol_new quin_tdm_rx_5_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUINARY_TDM_RX_5) };
+
+static const struct snd_kcontrol_new quin_tdm_rx_6_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUINARY_TDM_RX_6) };
+
+static const struct snd_kcontrol_new quin_tdm_rx_7_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUINARY_TDM_RX_7) };
+
+
+static const struct snd_kcontrol_new mmul1_mixer_controls[] = {
+ Q6ROUTING_TX_MIXERS(MSM_FRONTEND_DAI_MULTIMEDIA1) };
+
+static const struct snd_kcontrol_new mmul2_mixer_controls[] = {
+ Q6ROUTING_TX_MIXERS(MSM_FRONTEND_DAI_MULTIMEDIA2) };
+
+static const struct snd_kcontrol_new mmul3_mixer_controls[] = {
+ Q6ROUTING_TX_MIXERS(MSM_FRONTEND_DAI_MULTIMEDIA3) };
+
+static const struct snd_kcontrol_new mmul4_mixer_controls[] = {
+ Q6ROUTING_TX_MIXERS(MSM_FRONTEND_DAI_MULTIMEDIA4) };
+
+static const struct snd_kcontrol_new mmul5_mixer_controls[] = {
+ Q6ROUTING_TX_MIXERS(MSM_FRONTEND_DAI_MULTIMEDIA5) };
+
+static const struct snd_kcontrol_new mmul6_mixer_controls[] = {
+ Q6ROUTING_TX_MIXERS(MSM_FRONTEND_DAI_MULTIMEDIA6) };
+
+static const struct snd_kcontrol_new mmul7_mixer_controls[] = {
+ Q6ROUTING_TX_MIXERS(MSM_FRONTEND_DAI_MULTIMEDIA7) };
+
+static const struct snd_kcontrol_new mmul8_mixer_controls[] = {
+ Q6ROUTING_TX_MIXERS(MSM_FRONTEND_DAI_MULTIMEDIA8) };
+=======
+ SOC_SINGLE_EXT("MultiMedia1", AFE_PORT_HDMI_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0,
+ msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia2", AFE_PORT_HDMI_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0,
+ msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia3", AFE_PORT_HDMI_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0,
+ msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia4", AFE_PORT_HDMI_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0,
+ msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia5", AFE_PORT_HDMI_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0,
+ msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia6", AFE_PORT_HDMI_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0,
+ msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia7", AFE_PORT_HDMI_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0,
+ msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia8", AFE_PORT_HDMI_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0,
+ msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new primary_mi2s_rx_mixer_controls[] = {
+ SOC_SINGLE_EXT("MultiMedia1", PRIMARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia2", PRIMARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia3", PRIMARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia4", PRIMARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia5", PRIMARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia6", PRIMARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia7", PRIMARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia8", PRIMARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new secondary_mi2s_rx_mixer_controls[] = {
+ SOC_SINGLE_EXT("MultiMedia1", SECONDARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia2", SECONDARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia3", SECONDARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia4", SECONDARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia5", SECONDARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia6", SECONDARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia7", SECONDARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia8", SECONDARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new quaternary_mi2s_rx_mixer_controls[] = {
+ SOC_SINGLE_EXT("MultiMedia1", QUATERNARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia2", QUATERNARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia3", QUATERNARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia4", QUATERNARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia5", QUATERNARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia6", QUATERNARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia7", QUATERNARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia8", QUATERNARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new tertiary_mi2s_rx_mixer_controls[] = {
+ SOC_SINGLE_EXT("MultiMedia1", TERTIARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia2", TERTIARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia3", TERTIARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia4", TERTIARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
+
+static const struct snd_kcontrol_new slimbus_rx_mixer_controls[] = {
+ SOC_SINGLE_EXT("MultiMedia1", SLIMBUS_0_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia2", SLIMBUS_0_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia3", SLIMBUS_0_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia4", SLIMBUS_0_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia5", SLIMBUS_0_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia6", SLIMBUS_0_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia7", SLIMBUS_0_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia8", SLIMBUS_0_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new slimbus_1_rx_mixer_controls[] = {
+ SOC_SINGLE_EXT("MultiMedia1", SLIMBUS_1_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia2", SLIMBUS_1_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia3", SLIMBUS_1_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia4", SLIMBUS_1_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia5", SLIMBUS_1_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia6", SLIMBUS_1_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia7", SLIMBUS_1_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia8", SLIMBUS_1_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new slimbus_2_rx_mixer_controls[] = {
+ SOC_SINGLE_EXT("MultiMedia1", SLIMBUS_2_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia2", SLIMBUS_2_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia3", SLIMBUS_2_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia4", SLIMBUS_2_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia5", SLIMBUS_2_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia6", SLIMBUS_2_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia7", SLIMBUS_2_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia8", SLIMBUS_2_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new slimbus_3_rx_mixer_controls[] = {
+ SOC_SINGLE_EXT("MultiMedia1", SLIMBUS_3_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia2", SLIMBUS_3_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia3", SLIMBUS_3_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia4", SLIMBUS_3_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia5", SLIMBUS_3_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia6", SLIMBUS_3_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia7", SLIMBUS_3_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia8", SLIMBUS_3_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new slimbus_4_rx_mixer_controls[] = {
+ SOC_SINGLE_EXT("MultiMedia1", SLIMBUS_4_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia2", SLIMBUS_4_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia5", SLIMBUS_4_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new slimbus_5_rx_mixer_controls[] = {
+ SOC_SINGLE_EXT("MultiMedia1", SLIMBUS_5_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia2", SLIMBUS_5_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia3", SLIMBUS_5_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia4", SLIMBUS_5_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia5", SLIMBUS_5_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia6", SLIMBUS_5_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia7", SLIMBUS_5_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia8", SLIMBUS_5_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new slimbus_6_rx_mixer_controls[] = {
+ SOC_SINGLE_EXT("MultiMedia1", SLIMBUS_6_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia2", SLIMBUS_6_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia3", SLIMBUS_6_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia4", SLIMBUS_6_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia5", SLIMBUS_6_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia6", SLIMBUS_6_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia7", SLIMBUS_6_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia8", SLIMBUS_6_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new mmul1_mixer_controls[] = {
+ SOC_SINGLE_EXT("PRI_MI2S_TX", PRIMARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_MI2S_TX", QUATERNARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_MI2S_TX", TERTIARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SEC_MI2S_TX", SECONDARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new mmul2_mixer_controls[] = {
+ SOC_SINGLE_EXT("PRI_MI2S_TX", PRIMARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_MI2S_TX", QUATERNARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_MI2S_TX", TERTIARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SEC_MI2S_TX", SECONDARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new mmul4_mixer_controls[] = {
+ SOC_SINGLE_EXT("PRI_MI2S_TX", PRIMARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_MI2S_TX", QUATERNARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_MI2S_TX", TERTIARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SEC_MI2S_TX", SECONDARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+>>>>>>>
+
+static const struct snd_soc_dapm_widget msm_qdsp6_widgets[] = {
+ /* Frontend AIF */
+ SND_SOC_DAPM_AIF_IN("MM_DL1", "MultiMedia1 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("MM_DL2", "MultiMedia2 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("MM_DL3", "MultiMedia3 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("MM_DL4", "MultiMedia4 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("MM_DL5", "MultiMedia5 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("MM_DL6", "MultiMedia6 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("MM_DL7", "MultiMedia7 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("MM_DL8", "MultiMedia8 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("MM_UL1", "MultiMedia1 Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("MM_UL2", "MultiMedia2 Capture", 0, 0, 0, 0),
+<<<<<<<
+ SND_SOC_DAPM_AIF_OUT("MM_UL3", "MultiMedia3 Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("MM_UL4", "MultiMedia4 Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("MM_UL5", "MultiMedia5 Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("MM_UL6", "MultiMedia6 Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("MM_UL7", "MultiMedia7 Capture", 0, 0, 0, 0),
+=======
+ SND_SOC_DAPM_AIF_OUT("MM_UL4", "MultiMedia4 Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("MM_UL5", "MultiMedia5 Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("MM_UL6", "MultiMedia6 Capture", 0, 0, 0, 0),
+>>>>>>>
+ SND_SOC_DAPM_AIF_OUT("MM_UL8", "MultiMedia8 Capture", 0, 0, 0, 0),
+
+ /* Mixer definitions */
+ SND_SOC_DAPM_MIXER("HDMI Mixer", SND_SOC_NOPM, 0, 0,
+ hdmi_mixer_controls,
+ ARRAY_SIZE(hdmi_mixer_controls)),
+
+ SND_SOC_DAPM_MIXER("SLIMBUS_0_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+ slimbus_rx_mixer_controls,
+ ARRAY_SIZE(slimbus_rx_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SLIMBUS_1_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+ slimbus_1_rx_mixer_controls,
+ ARRAY_SIZE(slimbus_1_rx_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SLIMBUS_2_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+ slimbus_2_rx_mixer_controls,
+ ARRAY_SIZE(slimbus_2_rx_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SLIMBUS_3_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+ slimbus_3_rx_mixer_controls,
+ ARRAY_SIZE(slimbus_3_rx_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SLIMBUS_4_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+ slimbus_4_rx_mixer_controls,
+ ARRAY_SIZE(slimbus_4_rx_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SLIMBUS_5_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+ slimbus_5_rx_mixer_controls,
+ ARRAY_SIZE(slimbus_5_rx_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SLIMBUS_6_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+ slimbus_6_rx_mixer_controls,
+ ARRAY_SIZE(slimbus_6_rx_mixer_controls)),
+ SND_SOC_DAPM_MIXER("PRI_MI2S_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+ primary_mi2s_rx_mixer_controls,
+ ARRAY_SIZE(primary_mi2s_rx_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SEC_MI2S_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+ secondary_mi2s_rx_mixer_controls,
+ ARRAY_SIZE(secondary_mi2s_rx_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUAT_MI2S_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quaternary_mi2s_rx_mixer_controls,
+ ARRAY_SIZE(quaternary_mi2s_rx_mixer_controls)),
+ SND_SOC_DAPM_MIXER("TERT_MI2S_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+ tertiary_mi2s_rx_mixer_controls,
+ ARRAY_SIZE(tertiary_mi2s_rx_mixer_controls)),
+<<<<<<<
+=======
+ SND_SOC_DAPM_MIXER("PRIMARY_TDM_RX_0 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ pri_tdm_rx_0_mixer_controls,
+ ARRAY_SIZE(pri_tdm_rx_0_mixer_controls)),
+ SND_SOC_DAPM_MIXER("PRIMARY_TDM_RX_1 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ pri_tdm_rx_1_mixer_controls,
+ ARRAY_SIZE(pri_tdm_rx_1_mixer_controls)),
+ SND_SOC_DAPM_MIXER("PRIMARY_TDM_RX_2 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ pri_tdm_rx_2_mixer_controls,
+ ARRAY_SIZE(pri_tdm_rx_2_mixer_controls)),
+ SND_SOC_DAPM_MIXER("PRIMARY_TDM_RX_3 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ pri_tdm_rx_3_mixer_controls,
+ ARRAY_SIZE(pri_tdm_rx_3_mixer_controls)),
+ SND_SOC_DAPM_MIXER("PRIMARY_TDM_RX_4 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ pri_tdm_rx_4_mixer_controls,
+ ARRAY_SIZE(pri_tdm_rx_4_mixer_controls)),
+ SND_SOC_DAPM_MIXER("PRIMARY_TDM_RX_5 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ pri_tdm_rx_5_mixer_controls,
+ ARRAY_SIZE(pri_tdm_rx_5_mixer_controls)),
+ SND_SOC_DAPM_MIXER("PRIMARY_TDM_RX_6 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ pri_tdm_rx_6_mixer_controls,
+ ARRAY_SIZE(pri_tdm_rx_6_mixer_controls)),
+ SND_SOC_DAPM_MIXER("PRIMARY_TDM_RX_7 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ pri_tdm_rx_7_mixer_controls,
+ ARRAY_SIZE(pri_tdm_rx_7_mixer_controls)),
+
+ SND_SOC_DAPM_MIXER("SEC_TDM_RX_0 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ sec_tdm_rx_0_mixer_controls,
+ ARRAY_SIZE(sec_tdm_rx_0_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SEC_TDM_RX_1 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ sec_tdm_rx_1_mixer_controls,
+ ARRAY_SIZE(sec_tdm_rx_1_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SEC_TDM_RX_2 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ sec_tdm_rx_2_mixer_controls,
+ ARRAY_SIZE(sec_tdm_rx_2_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SEC_TDM_RX_3 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ sec_tdm_rx_3_mixer_controls,
+ ARRAY_SIZE(sec_tdm_rx_3_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SEC_TDM_RX_4 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ sec_tdm_rx_4_mixer_controls,
+ ARRAY_SIZE(sec_tdm_rx_4_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SEC_TDM_RX_5 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ sec_tdm_rx_5_mixer_controls,
+ ARRAY_SIZE(sec_tdm_rx_5_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SEC_TDM_RX_6 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ sec_tdm_rx_6_mixer_controls,
+ ARRAY_SIZE(sec_tdm_rx_6_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SEC_TDM_RX_7 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ sec_tdm_rx_7_mixer_controls,
+ ARRAY_SIZE(sec_tdm_rx_7_mixer_controls)),
+
+ SND_SOC_DAPM_MIXER("TERT_TDM_RX_0 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ tert_tdm_rx_0_mixer_controls,
+ ARRAY_SIZE(tert_tdm_rx_0_mixer_controls)),
+ SND_SOC_DAPM_MIXER("TERT_TDM_RX_1 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ tert_tdm_rx_1_mixer_controls,
+ ARRAY_SIZE(tert_tdm_rx_1_mixer_controls)),
+ SND_SOC_DAPM_MIXER("TERT_TDM_RX_2 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ tert_tdm_rx_2_mixer_controls,
+ ARRAY_SIZE(tert_tdm_rx_2_mixer_controls)),
+ SND_SOC_DAPM_MIXER("TERT_TDM_RX_3 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ tert_tdm_rx_3_mixer_controls,
+ ARRAY_SIZE(tert_tdm_rx_3_mixer_controls)),
+ SND_SOC_DAPM_MIXER("TERT_TDM_RX_4 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ tert_tdm_rx_4_mixer_controls,
+ ARRAY_SIZE(tert_tdm_rx_4_mixer_controls)),
+ SND_SOC_DAPM_MIXER("TERT_TDM_RX_5 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ tert_tdm_rx_5_mixer_controls,
+ ARRAY_SIZE(tert_tdm_rx_5_mixer_controls)),
+ SND_SOC_DAPM_MIXER("TERT_TDM_RX_6 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ tert_tdm_rx_6_mixer_controls,
+ ARRAY_SIZE(tert_tdm_rx_6_mixer_controls)),
+ SND_SOC_DAPM_MIXER("TERT_TDM_RX_7 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ tert_tdm_rx_7_mixer_controls,
+ ARRAY_SIZE(tert_tdm_rx_7_mixer_controls)),
+
+ SND_SOC_DAPM_MIXER("QUAT_TDM_RX_0 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quat_tdm_rx_0_mixer_controls,
+ ARRAY_SIZE(quat_tdm_rx_0_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUAT_TDM_RX_1 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quat_tdm_rx_1_mixer_controls,
+ ARRAY_SIZE(quat_tdm_rx_1_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUAT_TDM_RX_2 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quat_tdm_rx_2_mixer_controls,
+ ARRAY_SIZE(quat_tdm_rx_2_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUAT_TDM_RX_3 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quat_tdm_rx_3_mixer_controls,
+ ARRAY_SIZE(quat_tdm_rx_3_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUAT_TDM_RX_4 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quat_tdm_rx_4_mixer_controls,
+ ARRAY_SIZE(quat_tdm_rx_4_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUAT_TDM_RX_5 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quat_tdm_rx_5_mixer_controls,
+ ARRAY_SIZE(quat_tdm_rx_5_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUAT_TDM_RX_6 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quat_tdm_rx_6_mixer_controls,
+ ARRAY_SIZE(quat_tdm_rx_6_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUAT_TDM_RX_7 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quat_tdm_rx_7_mixer_controls,
+ ARRAY_SIZE(quat_tdm_rx_7_mixer_controls)),
+
+ SND_SOC_DAPM_MIXER("QUIN_TDM_RX_0 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quin_tdm_rx_0_mixer_controls,
+ ARRAY_SIZE(quin_tdm_rx_0_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUIN_TDM_RX_1 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quin_tdm_rx_1_mixer_controls,
+ ARRAY_SIZE(quin_tdm_rx_1_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUIN_TDM_RX_2 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quin_tdm_rx_2_mixer_controls,
+ ARRAY_SIZE(quin_tdm_rx_2_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUIN_TDM_RX_3 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quin_tdm_rx_3_mixer_controls,
+ ARRAY_SIZE(quin_tdm_rx_3_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUIN_TDM_RX_4 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quin_tdm_rx_4_mixer_controls,
+ ARRAY_SIZE(quin_tdm_rx_4_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUIN_TDM_RX_5 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quin_tdm_rx_5_mixer_controls,
+ ARRAY_SIZE(quin_tdm_rx_5_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUIN_TDM_RX_6 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quin_tdm_rx_6_mixer_controls,
+ ARRAY_SIZE(quin_tdm_rx_6_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUIN_TDM_RX_7 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quin_tdm_rx_7_mixer_controls,
+ ARRAY_SIZE(quin_tdm_rx_7_mixer_controls)),
+>>>>>>>
+ SND_SOC_DAPM_MIXER("MultiMedia1 Mixer", SND_SOC_NOPM, 0, 0,
+ mmul1_mixer_controls, ARRAY_SIZE(mmul1_mixer_controls)),
+ SND_SOC_DAPM_MIXER("MultiMedia2 Mixer", SND_SOC_NOPM, 0, 0,
+ mmul2_mixer_controls, ARRAY_SIZE(mmul2_mixer_controls)),
+<<<<<<<
+ SND_SOC_DAPM_MIXER("MultiMedia3 Mixer", SND_SOC_NOPM, 0, 0,
+ mmul3_mixer_controls, ARRAY_SIZE(mmul3_mixer_controls)),
+ SND_SOC_DAPM_MIXER("MultiMedia4 Mixer", SND_SOC_NOPM, 0, 0,
+ mmul4_mixer_controls, ARRAY_SIZE(mmul4_mixer_controls)),
+ SND_SOC_DAPM_MIXER("MultiMedia5 Mixer", SND_SOC_NOPM, 0, 0,
+ mmul5_mixer_controls, ARRAY_SIZE(mmul5_mixer_controls)),
+ SND_SOC_DAPM_MIXER("MultiMedia6 Mixer", SND_SOC_NOPM, 0, 0,
+ mmul6_mixer_controls, ARRAY_SIZE(mmul6_mixer_controls)),
+ SND_SOC_DAPM_MIXER("MultiMedia7 Mixer", SND_SOC_NOPM, 0, 0,
+ mmul7_mixer_controls, ARRAY_SIZE(mmul7_mixer_controls)),
+ SND_SOC_DAPM_MIXER("MultiMedia8 Mixer", SND_SOC_NOPM, 0, 0,
+ mmul8_mixer_controls, ARRAY_SIZE(mmul8_mixer_controls)),
+=======
+ SND_SOC_DAPM_MIXER("MultiMedia4 Mixer", SND_SOC_NOPM, 0, 0,
+ mmul4_mixer_controls, ARRAY_SIZE(mmul4_mixer_controls)),
+>>>>>>>
+
+};
+
+static const struct snd_soc_dapm_route intercon[] = {
+<<<<<<<
+ Q6ROUTING_RX_DAPM_ROUTE("HDMI Mixer", "HDMI_RX"),
+ Q6ROUTING_RX_DAPM_ROUTE("SLIMBUS_0_RX Audio Mixer", "SLIMBUS_0_RX"),
+ Q6ROUTING_RX_DAPM_ROUTE("SLIMBUS_1_RX Audio Mixer", "SLIMBUS_1_RX"),
+ Q6ROUTING_RX_DAPM_ROUTE("SLIMBUS_2_RX Audio Mixer", "SLIMBUS_2_RX"),
+ Q6ROUTING_RX_DAPM_ROUTE("SLIMBUS_3_RX Audio Mixer", "SLIMBUS_3_RX"),
+ Q6ROUTING_RX_DAPM_ROUTE("SLIMBUS_4_RX Audio Mixer", "SLIMBUS_4_RX"),
+ Q6ROUTING_RX_DAPM_ROUTE("SLIMBUS_5_RX Audio Mixer", "SLIMBUS_5_RX"),
+ Q6ROUTING_RX_DAPM_ROUTE("SLIMBUS_6_RX Audio Mixer", "SLIMBUS_6_RX"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUAT_MI2S_RX Audio Mixer", "QUAT_MI2S_RX"),
+ Q6ROUTING_RX_DAPM_ROUTE("TERT_MI2S_RX Audio Mixer", "TERT_MI2S_RX"),
+ Q6ROUTING_RX_DAPM_ROUTE("SEC_MI2S_RX Audio Mixer", "SEC_MI2S_RX"),
+ Q6ROUTING_RX_DAPM_ROUTE("PRI_MI2S_RX Audio Mixer", "PRI_MI2S_RX"),
+ Q6ROUTING_RX_DAPM_ROUTE("PRIMARY_TDM_RX_0 Audio Mixer",
+ "PRIMARY_TDM_RX_0"),
+ Q6ROUTING_RX_DAPM_ROUTE("PRIMARY_TDM_RX_1 Audio Mixer",
+ "PRIMARY_TDM_RX_1"),
+ Q6ROUTING_RX_DAPM_ROUTE("PRIMARY_TDM_RX_2 Audio Mixer",
+ "PRIMARY_TDM_RX_2"),
+ Q6ROUTING_RX_DAPM_ROUTE("PRIMARY_TDM_RX_3 Audio Mixer",
+ "PRIMARY_TDM_RX_3"),
+ Q6ROUTING_RX_DAPM_ROUTE("PRIMARY_TDM_RX_4 Audio Mixer",
+ "PRIMARY_TDM_RX_4"),
+ Q6ROUTING_RX_DAPM_ROUTE("PRIMARY_TDM_RX_5 Audio Mixer",
+ "PRIMARY_TDM_RX_5"),
+ Q6ROUTING_RX_DAPM_ROUTE("PRIMARY_TDM_RX_6 Audio Mixer",
+ "PRIMARY_TDM_RX_6"),
+ Q6ROUTING_RX_DAPM_ROUTE("PRIMARY_TDM_RX_7 Audio Mixer",
+ "PRIMARY_TDM_RX_7"),
+ Q6ROUTING_RX_DAPM_ROUTE("SEC_TDM_RX_0 Audio Mixer", "SEC_TDM_RX_0"),
+ Q6ROUTING_RX_DAPM_ROUTE("SEC_TDM_RX_1 Audio Mixer", "SEC_TDM_RX_1"),
+ Q6ROUTING_RX_DAPM_ROUTE("SEC_TDM_RX_2 Audio Mixer", "SEC_TDM_RX_2"),
+ Q6ROUTING_RX_DAPM_ROUTE("SEC_TDM_RX_3 Audio Mixer", "SEC_TDM_RX_3"),
+ Q6ROUTING_RX_DAPM_ROUTE("SEC_TDM_RX_4 Audio Mixer", "SEC_TDM_RX_4"),
+ Q6ROUTING_RX_DAPM_ROUTE("SEC_TDM_RX_5 Audio Mixer", "SEC_TDM_RX_5"),
+ Q6ROUTING_RX_DAPM_ROUTE("SEC_TDM_RX_6 Audio Mixer", "SEC_TDM_RX_6"),
+ Q6ROUTING_RX_DAPM_ROUTE("SEC_TDM_RX_7 Audio Mixer", "SEC_TDM_RX_7"),
+ Q6ROUTING_RX_DAPM_ROUTE("TERT_TDM_RX_0 Audio Mixer", "TERT_TDM_RX_0"),
+ Q6ROUTING_RX_DAPM_ROUTE("TERT_TDM_RX_1 Audio Mixer", "TERT_TDM_RX_1"),
+ Q6ROUTING_RX_DAPM_ROUTE("TERT_TDM_RX_2 Audio Mixer", "TERT_TDM_RX_2"),
+ Q6ROUTING_RX_DAPM_ROUTE("TERT_TDM_RX_3 Audio Mixer", "TERT_TDM_RX_3"),
+ Q6ROUTING_RX_DAPM_ROUTE("TERT_TDM_RX_4 Audio Mixer", "TERT_TDM_RX_4"),
+ Q6ROUTING_RX_DAPM_ROUTE("TERT_TDM_RX_5 Audio Mixer", "TERT_TDM_RX_5"),
+ Q6ROUTING_RX_DAPM_ROUTE("TERT_TDM_RX_6 Audio Mixer", "TERT_TDM_RX_6"),
+ Q6ROUTING_RX_DAPM_ROUTE("TERT_TDM_RX_7 Audio Mixer", "TERT_TDM_RX_7"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUAT_TDM_RX_0 Audio Mixer", "QUAT_TDM_RX_0"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUAT_TDM_RX_1 Audio Mixer", "QUAT_TDM_RX_1"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUAT_TDM_RX_2 Audio Mixer", "QUAT_TDM_RX_2"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUAT_TDM_RX_3 Audio Mixer", "QUAT_TDM_RX_3"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUAT_TDM_RX_4 Audio Mixer", "QUAT_TDM_RX_4"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUAT_TDM_RX_5 Audio Mixer", "QUAT_TDM_RX_5"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUAT_TDM_RX_6 Audio Mixer", "QUAT_TDM_RX_6"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUAT_TDM_RX_7 Audio Mixer", "QUAT_TDM_RX_7"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUIN_TDM_RX_0 Audio Mixer", "QUIN_TDM_RX_0"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUIN_TDM_RX_1 Audio Mixer", "QUIN_TDM_RX_1"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUIN_TDM_RX_2 Audio Mixer", "QUIN_TDM_RX_2"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUIN_TDM_RX_3 Audio Mixer", "QUIN_TDM_RX_3"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUIN_TDM_RX_4 Audio Mixer", "QUIN_TDM_RX_4"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUIN_TDM_RX_5 Audio Mixer", "QUIN_TDM_RX_5"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUIN_TDM_RX_6 Audio Mixer", "QUIN_TDM_RX_6"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUIN_TDM_RX_7 Audio Mixer", "QUIN_TDM_RX_7"),
+ Q6ROUTING_TX_DAPM_ROUTE("MultiMedia1 Mixer"),
+ Q6ROUTING_TX_DAPM_ROUTE("MultiMedia2 Mixer"),
+ Q6ROUTING_TX_DAPM_ROUTE("MultiMedia3 Mixer"),
+ Q6ROUTING_TX_DAPM_ROUTE("MultiMedia4 Mixer"),
+ Q6ROUTING_TX_DAPM_ROUTE("MultiMedia5 Mixer"),
+ Q6ROUTING_TX_DAPM_ROUTE("MultiMedia6 Mixer"),
+ Q6ROUTING_TX_DAPM_ROUTE("MultiMedia7 Mixer"),
+ Q6ROUTING_TX_DAPM_ROUTE("MultiMedia8 Mixer"),
+
+ {"MM_UL1", NULL, "MultiMedia1 Mixer"},
+ {"MM_UL2", NULL, "MultiMedia2 Mixer"},
+ {"MM_UL3", NULL, "MultiMedia3 Mixer"},
+ {"MM_UL4", NULL, "MultiMedia4 Mixer"},
+ {"MM_UL5", NULL, "MultiMedia5 Mixer"},
+ {"MM_UL6", NULL, "MultiMedia6 Mixer"},
+ {"MM_UL7", NULL, "MultiMedia7 Mixer"},
+ {"MM_UL8", NULL, "MultiMedia8 Mixer"},
+=======
+ {"HDMI Mixer", "MultiMedia1", "MM_DL1"},
+ {"HDMI Mixer", "MultiMedia2", "MM_DL2"},
+ {"HDMI Mixer", "MultiMedia3", "MM_DL3"},
+ {"HDMI Mixer", "MultiMedia4", "MM_DL4"},
+ {"HDMI Mixer", "MultiMedia5", "MM_DL5"},
+ {"HDMI Mixer", "MultiMedia6", "MM_DL6"},
+ {"HDMI Mixer", "MultiMedia7", "MM_DL7"},
+ {"HDMI Mixer", "MultiMedia8", "MM_DL8"},
+ {"HDMI_RX", NULL, "HDMI Mixer"},
+
+ {"SLIMBUS_0_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+ {"SLIMBUS_0_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+ {"SLIMBUS_0_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+ {"SLIMBUS_0_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+ {"SLIMBUS_0_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+ {"SLIMBUS_0_RX Audio Mixer", "MultiMedia6", "MM_DL6"},
+ {"SLIMBUS_0_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+ {"SLIMBUS_0_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+ {"SLIMBUS_0_RX", NULL, "SLIMBUS_0_RX Audio Mixer"},
+
+ {"SLIMBUS_1_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+ {"SLIMBUS_1_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+ {"SLIMBUS_1_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+ {"SLIMBUS_1_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+ {"SLIMBUS_1_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+ {"SLIMBUS_1_RX Audio Mixer", "MultiMedia6", "MM_DL6"},
+ {"SLIMBUS_1_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+ {"SLIMBUS_1_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+ {"SLIMBUS_1_RX", NULL, "SLIMBUS_1_RX Audio Mixer"},
+
+ {"SLIMBUS_2_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+ {"SLIMBUS_2_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+ {"SLIMBUS_2_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+ {"SLIMBUS_2_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+ {"SLIMBUS_2_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+ {"SLIMBUS_2_RX Audio Mixer", "MultiMedia6", "MM_DL6"},
+ {"SLIMBUS_2_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+ {"SLIMBUS_2_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+ {"SLIMBUS_2_RX", NULL, "SLIMBUS_2_RX Audio Mixer"},
+
+ {"SLIMBUS_3_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+ {"SLIMBUS_3_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+ {"SLIMBUS_3_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+ {"SLIMBUS_3_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+ {"SLIMBUS_3_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+ {"SLIMBUS_3_RX Audio Mixer", "MultiMedia6", "MM_DL6"},
+ {"SLIMBUS_3_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+ {"SLIMBUS_3_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+ {"SLIMBUS_3_RX", NULL, "SLIMBUS_3_RX Audio Mixer"},
+
+ {"SLIMBUS_4_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+ {"SLIMBUS_4_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+ {"SLIMBUS_4_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+ {"SLIMBUS_4_RX", NULL, "SLIMBUS_4_RX Audio Mixer"},
+
+ {"SLIMBUS_5_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+ {"SLIMBUS_5_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+ {"SLIMBUS_5_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+ {"SLIMBUS_5_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+ {"SLIMBUS_5_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+ {"SLIMBUS_5_RX Audio Mixer", "MultiMedia6", "MM_DL6"},
+ {"SLIMBUS_5_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+ {"SLIMBUS_5_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+ {"SLIMBUS_5_RX", NULL, "SLIMBUS_5_RX Audio Mixer"},
+
+ {"SLIMBUS_6_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+ {"SLIMBUS_6_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+ {"SLIMBUS_6_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+ {"SLIMBUS_6_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+ {"SLIMBUS_6_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+ {"SLIMBUS_6_RX Audio Mixer", "MultiMedia6", "MM_DL6"},
+ {"SLIMBUS_6_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+ {"SLIMBUS_6_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+ {"SLIMBUS_6_RX", NULL, "SLIMBUS_6_RX Audio Mixer"},
+
+ {"QUAT_MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+ {"QUAT_MI2S_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+ {"QUAT_MI2S_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+ {"QUAT_MI2S_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+ {"QUAT_MI2S_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+ {"QUAT_MI2S_RX Audio Mixer", "MultiMedia6", "MM_DL6"},
+ {"QUAT_MI2S_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+ {"QUAT_MI2S_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+ {"QUAT_MI2S_RX", NULL, "QUAT_MI2S_RX Audio Mixer"},
+
+ {"TERT_MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+ {"TERT_MI2S_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+ {"TERT_MI2S_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+ {"TERT_MI2S_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+ {"TERT_MI2S_RX", NULL, "TERT_MI2S_RX Audio Mixer"},
+
+ {"SEC_MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+ {"SEC_MI2S_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+ {"SEC_MI2S_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+ {"SEC_MI2S_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+ {"SEC_MI2S_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+ {"SEC_MI2S_RX Audio Mixer", "MultiMedia6", "MM_DL5"},
+ {"SEC_MI2S_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+ {"SEC_MI2S_RX Audio Mixer", "MultiMedia8", "MM_DL7"},
+ {"SEC_MI2S_RX", NULL, "SEC_MI2S_RX Audio Mixer"},
+
+ {"PRI_MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+ {"PRI_MI2S_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+ {"PRI_MI2S_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+ {"PRI_MI2S_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+ {"PRI_MI2S_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+ {"PRI_MI2S_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+ {"PRI_MI2S_RX", NULL, "PRI_MI2S_RX Audio Mixer"},
+
+ {"MultiMedia1 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+ {"MultiMedia1 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+ {"MM_UL1", NULL, "MultiMedia1 Mixer"},
+>>>>>>>
+};
+
+static int routing_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+<<<<<<<
+ struct snd_soc_component *c = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
+ struct msm_routing_data *data = dev_get_drvdata(c->dev);
+ unsigned int be_id = rtd->cpu_dai->id;
+=======
+ unsigned int be_id = rtd->cpu_dai->id;
+ struct snd_soc_platform *platform = rtd->platform;
+ struct msm_routing_data *data = q6adm_get_routing_data(platform->dev);
+>>>>>>>
+ struct session_data *session;
+ int path_type;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ path_type = ADM_PATH_PLAYBACK;
+ else
+ path_type = ADM_PATH_LIVE_REC;
+
+ if (be_id > AFE_MAX_PORTS)
+ return -EINVAL;
+
+ session = &data->port_data[be_id];
+
+ mutex_lock(&data->lock);
+
+ session->path_type = path_type;
+ session->sample_rate = params_rate(params);
+ session->channels = params_channels(params);
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ session->bits_per_sample = 16;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ session->bits_per_sample = 24;
+ break;
+ default:
+ break;
+ }
+
+ mutex_unlock(&data->lock);
+ return 0;
+}
+
+static struct snd_pcm_ops q6pcm_routing_ops = {
+ .hw_params = routing_hw_params,
+};
+
+<<<<<<<
+static int msm_routing_probe(struct snd_soc_component *c)
+=======
+static int msm_routing_probe(struct snd_soc_platform *platform)
+>>>>>>>
+{
+ int i;
+
+ for (i = 0; i < MAX_SESSIONS; i++)
+ routing_data->sessions[i].port_id = -1;
+
+ return 0;
+}
+
+<<<<<<<
+static const struct snd_soc_component_driver msm_soc_routing_component = {
+ .ops = &q6pcm_routing_ops,
+ .probe = msm_routing_probe,
+ .name = DRV_NAME,
+ .dapm_widgets = msm_qdsp6_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(msm_qdsp6_widgets),
+ .dapm_routes = intercon,
+ .num_dapm_routes = ARRAY_SIZE(intercon),
+};
+
+static int q6routing_dai_bind(struct device *dev, struct device *master,
+ void *data)
+=======
+static struct snd_soc_platform_driver msm_soc_routing_platform = {
+ .ops = &q6pcm_routing_ops,
+ .probe = msm_routing_probe,
+ .component_driver = {
+ .name = "q6routing-component",
+ .dapm_widgets = msm_qdsp6_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(msm_qdsp6_widgets),
+ .dapm_routes = intercon,
+ .num_dapm_routes = ARRAY_SIZE(intercon),
+ },
+};
+
+int q6pcm_routing_probe(struct device *dev)
+>>>>>>>
+{
+ routing_data = kzalloc(sizeof(*routing_data), GFP_KERNEL);
+ if (!routing_data)
+ return -ENOMEM;
+
+ routing_data->dev = dev;
+
+ mutex_init(&routing_data->lock);
+<<<<<<<
+ dev_set_drvdata(dev, routing_data);
+
+ return snd_soc_register_component(dev, &msm_soc_routing_component,
+ NULL, 0);
+}
+
+static void q6routing_dai_unbind(struct device *dev, struct device *master,
+ void *d)
+{
+ struct msm_routing_data *data = dev_get_drvdata(dev);
+
+ snd_soc_unregister_component(dev);
+
+ kfree(data);
+
+ routing_data = NULL;
+}
+
+static const struct component_ops q6routing_dai_comp_ops = {
+ .bind = q6routing_dai_bind,
+ .unbind = q6routing_dai_unbind,
+};
+
+static int q6pcm_routing_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &q6routing_dai_comp_ops);
+}
+
+static int q6pcm_routing_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &q6routing_dai_comp_ops);
+ return 0;
+}
+
+static struct platform_driver q6pcm_routing_platform_driver = {
+ .driver = {
+ .name = "q6routing",
+ },
+ .probe = q6pcm_routing_probe,
+ .remove = q6pcm_routing_remove,
+};
+module_platform_driver(q6pcm_routing_platform_driver);
+
+=======
+ q6adm_set_routing_data(dev, routing_data);
+
+ return devm_snd_soc_register_platform(dev,
+ &msm_soc_routing_platform);
+}
+EXPORT_SYMBOL_GPL(q6pcm_routing_probe);
+
+int q6pcm_routing_remove(struct device *dev)
+{
+ kfree(routing_data);
+
+ routing_data = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(q6pcm_routing_remove);
+>>>>>>>
+MODULE_DESCRIPTION("Q6 Routing platform");
+MODULE_LICENSE("GPL v2");
diff --git a/rr-cache/ceea2ce3f317f0e8444d0451756824b2d30128a7/preimage b/rr-cache/ceea2ce3f317f0e8444d0451756824b2d30128a7/preimage
new file mode 100644
index 0000000..77f16aa
--- /dev/null
+++ b/rr-cache/ceea2ce3f317f0e8444d0451756824b2d30128a7/preimage
@@ -0,0 +1,184 @@
+<<<<<<<
+/*
+ * Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+=======
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved. */
+>>>>>>>
+
+#ifndef __QCOM_CLK_RCG_H__
+#define __QCOM_CLK_RCG_H__
+
+#include <linux/clk-provider.h>
+#include "clk-regmap.h"
+
+struct freq_tbl {
+ unsigned long freq;
+ u8 src;
+ u8 pre_div;
+ u16 m;
+ u16 n;
+};
+
+/**
+ * struct mn - M/N:D counter
+ * @mnctr_en_bit: bit to enable mn counter
+ * @mnctr_reset_bit: bit to assert mn counter reset
+ * @mnctr_mode_shift: lowest bit of mn counter mode field
+ * @n_val_shift: lowest bit of n value field
+ * @m_val_shift: lowest bit of m value field
+ * @width: number of bits in m/n/d values
+ * @reset_in_cc: true if the mnctr_reset_bit is in the CC register
+ */
+struct mn {
+ u8 mnctr_en_bit;
+ u8 mnctr_reset_bit;
+ u8 mnctr_mode_shift;
+#define MNCTR_MODE_DUAL 0x2
+#define MNCTR_MODE_MASK 0x3
+ u8 n_val_shift;
+ u8 m_val_shift;
+ u8 width;
+ bool reset_in_cc;
+};
+
+/**
+ * struct pre_div - pre-divider
+ * @pre_div_shift: lowest bit of pre divider field
+ * @pre_div_width: number of bits in predivider
+ */
+struct pre_div {
+ u8 pre_div_shift;
+ u8 pre_div_width;
+};
+
+/**
+ * struct src_sel - source selector
+ * @src_sel_shift: lowest bit of source selection field
+ * @parent_map: map from software's parent index to hardware's src_sel field
+ */
+struct src_sel {
+ u8 src_sel_shift;
+#define SRC_SEL_MASK 0x7
+ const struct parent_map *parent_map;
+};
+
+/**
+ * struct clk_rcg - root clock generator
+ *
+ * @ns_reg: NS register
+ * @md_reg: MD register
+ * @mn: mn counter
+ * @p: pre divider
+ * @s: source selector
+ * @freq_tbl: frequency table
+ * @clkr: regmap clock handle
+ * @lock: register lock
+ *
+ */
+struct clk_rcg {
+ u32 ns_reg;
+ u32 md_reg;
+
+ struct mn mn;
+ struct pre_div p;
+ struct src_sel s;
+
+ const struct freq_tbl *freq_tbl;
+
+ struct clk_regmap clkr;
+};
+
+extern const struct clk_ops clk_rcg_ops;
+extern const struct clk_ops clk_rcg_bypass_ops;
+extern const struct clk_ops clk_rcg_bypass2_ops;
+extern const struct clk_ops clk_rcg_pixel_ops;
+extern const struct clk_ops clk_rcg_esc_ops;
+extern const struct clk_ops clk_rcg_lcc_ops;
+
+#define to_clk_rcg(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg, clkr)
+
+/**
+ * struct clk_dyn_rcg - root clock generator with glitch free mux
+ *
+ * @mux_sel_bit: bit to switch glitch free mux
+ * @ns_reg: NS0 and NS1 register
+ * @md_reg: MD0 and MD1 register
+ * @bank_reg: register to XOR @mux_sel_bit into to switch glitch free mux
+ * @mn: mn counter (banked)
+ * @s: source selector (banked)
+ * @freq_tbl: frequency table
+ * @clkr: regmap clock handle
+ * @lock: register lock
+ *
+ */
+struct clk_dyn_rcg {
+ u32 ns_reg[2];
+ u32 md_reg[2];
+ u32 bank_reg;
+
+ u8 mux_sel_bit;
+
+ struct mn mn[2];
+ struct pre_div p[2];
+ struct src_sel s[2];
+
+ const struct freq_tbl *freq_tbl;
+
+ struct clk_regmap clkr;
+};
+
+extern const struct clk_ops clk_dyn_rcg_ops;
+
+#define to_clk_dyn_rcg(_hw) \
+ container_of(to_clk_regmap(_hw), struct clk_dyn_rcg, clkr)
+
+/**
+ * struct clk_rcg2 - root clock generator
+ *
+ * @cmd_rcgr: corresponds to *_CMD_RCGR
+ * @mnd_width: number of bits in m/n/d values
+ * @hid_width: number of bits in half integer divider
+ * @safe_src_index: safe src index value
+ * @parent_map: map from software's parent index to hardware's src_sel field
+ * @freq_tbl: frequency table
+ * @current_freq: last cached frequency when using branches with shared RCGs
+ * @safe_src_freq_tbl : frequency table of safe source when using branches
+ * with shared RCGs
+ * @clkr: regmap clock handle
+ *
+ */
+struct clk_rcg2 {
+ u32 cmd_rcgr;
+ u8 mnd_width;
+ u8 hid_width;
+ u8 safe_src_index;
+ const struct parent_map *parent_map;
+ const struct freq_tbl *freq_tbl;
+ unsigned long current_freq;
+ const struct freq_tbl *safe_src_freq_tbl;
+ struct clk_regmap clkr;
+};
+
+#define to_clk_rcg2(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg2, clkr)
+
+extern const struct clk_ops clk_rcg2_ops;
+extern const struct clk_ops clk_rcg2_floor_ops;
+extern const struct clk_ops clk_edp_pixel_ops;
+extern const struct clk_ops clk_byte_ops;
+extern const struct clk_ops clk_byte2_ops;
+extern const struct clk_ops clk_pixel_ops;
+extern const struct clk_ops clk_gfx3d_ops;
+extern const struct clk_ops clk_rcg2_shared_ops;
+
+#endif
diff --git a/rr-cache/ceea2ce3f317f0e8444d0451756824b2d30128a7/preimage.1 b/rr-cache/ceea2ce3f317f0e8444d0451756824b2d30128a7/preimage.1
new file mode 100644
index 0000000..77f16aa
--- /dev/null
+++ b/rr-cache/ceea2ce3f317f0e8444d0451756824b2d30128a7/preimage.1
@@ -0,0 +1,184 @@
+<<<<<<<
+/*
+ * Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+=======
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved. */
+>>>>>>>
+
+#ifndef __QCOM_CLK_RCG_H__
+#define __QCOM_CLK_RCG_H__
+
+#include <linux/clk-provider.h>
+#include "clk-regmap.h"
+
+struct freq_tbl {
+ unsigned long freq;
+ u8 src;
+ u8 pre_div;
+ u16 m;
+ u16 n;
+};
+
+/**
+ * struct mn - M/N:D counter
+ * @mnctr_en_bit: bit to enable mn counter
+ * @mnctr_reset_bit: bit to assert mn counter reset
+ * @mnctr_mode_shift: lowest bit of mn counter mode field
+ * @n_val_shift: lowest bit of n value field
+ * @m_val_shift: lowest bit of m value field
+ * @width: number of bits in m/n/d values
+ * @reset_in_cc: true if the mnctr_reset_bit is in the CC register
+ */
+struct mn {
+ u8 mnctr_en_bit;
+ u8 mnctr_reset_bit;
+ u8 mnctr_mode_shift;
+#define MNCTR_MODE_DUAL 0x2
+#define MNCTR_MODE_MASK 0x3
+ u8 n_val_shift;
+ u8 m_val_shift;
+ u8 width;
+ bool reset_in_cc;
+};
+
+/**
+ * struct pre_div - pre-divider
+ * @pre_div_shift: lowest bit of pre divider field
+ * @pre_div_width: number of bits in predivider
+ */
+struct pre_div {
+ u8 pre_div_shift;
+ u8 pre_div_width;
+};
+
+/**
+ * struct src_sel - source selector
+ * @src_sel_shift: lowest bit of source selection field
+ * @parent_map: map from software's parent index to hardware's src_sel field
+ */
+struct src_sel {
+ u8 src_sel_shift;
+#define SRC_SEL_MASK 0x7
+ const struct parent_map *parent_map;
+};
+
+/**
+ * struct clk_rcg - root clock generator
+ *
+ * @ns_reg: NS register
+ * @md_reg: MD register
+ * @mn: mn counter
+ * @p: pre divider
+ * @s: source selector
+ * @freq_tbl: frequency table
+ * @clkr: regmap clock handle
+ * @lock: register lock
+ *
+ */
+struct clk_rcg {
+ u32 ns_reg;
+ u32 md_reg;
+
+ struct mn mn;
+ struct pre_div p;
+ struct src_sel s;
+
+ const struct freq_tbl *freq_tbl;
+
+ struct clk_regmap clkr;
+};
+
+extern const struct clk_ops clk_rcg_ops;
+extern const struct clk_ops clk_rcg_bypass_ops;
+extern const struct clk_ops clk_rcg_bypass2_ops;
+extern const struct clk_ops clk_rcg_pixel_ops;
+extern const struct clk_ops clk_rcg_esc_ops;
+extern const struct clk_ops clk_rcg_lcc_ops;
+
+#define to_clk_rcg(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg, clkr)
+
+/**
+ * struct clk_dyn_rcg - root clock generator with glitch free mux
+ *
+ * @mux_sel_bit: bit to switch glitch free mux
+ * @ns_reg: NS0 and NS1 register
+ * @md_reg: MD0 and MD1 register
+ * @bank_reg: register to XOR @mux_sel_bit into to switch glitch free mux
+ * @mn: mn counter (banked)
+ * @s: source selector (banked)
+ * @freq_tbl: frequency table
+ * @clkr: regmap clock handle
+ * @lock: register lock
+ *
+ */
+struct clk_dyn_rcg {
+ u32 ns_reg[2];
+ u32 md_reg[2];
+ u32 bank_reg;
+
+ u8 mux_sel_bit;
+
+ struct mn mn[2];
+ struct pre_div p[2];
+ struct src_sel s[2];
+
+ const struct freq_tbl *freq_tbl;
+
+ struct clk_regmap clkr;
+};
+
+extern const struct clk_ops clk_dyn_rcg_ops;
+
+#define to_clk_dyn_rcg(_hw) \
+ container_of(to_clk_regmap(_hw), struct clk_dyn_rcg, clkr)
+
+/**
+ * struct clk_rcg2 - root clock generator
+ *
+ * @cmd_rcgr: corresponds to *_CMD_RCGR
+ * @mnd_width: number of bits in m/n/d values
+ * @hid_width: number of bits in half integer divider
+ * @safe_src_index: safe src index value
+ * @parent_map: map from software's parent index to hardware's src_sel field
+ * @freq_tbl: frequency table
+ * @current_freq: last cached frequency when using branches with shared RCGs
+ * @safe_src_freq_tbl : frequency table of safe source when using branches
+ * with shared RCGs
+ * @clkr: regmap clock handle
+ *
+ */
+struct clk_rcg2 {
+ u32 cmd_rcgr;
+ u8 mnd_width;
+ u8 hid_width;
+ u8 safe_src_index;
+ const struct parent_map *parent_map;
+ const struct freq_tbl *freq_tbl;
+ unsigned long current_freq;
+ const struct freq_tbl *safe_src_freq_tbl;
+ struct clk_regmap clkr;
+};
+
+#define to_clk_rcg2(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg2, clkr)
+
+extern const struct clk_ops clk_rcg2_ops;
+extern const struct clk_ops clk_rcg2_floor_ops;
+extern const struct clk_ops clk_edp_pixel_ops;
+extern const struct clk_ops clk_byte_ops;
+extern const struct clk_ops clk_byte2_ops;
+extern const struct clk_ops clk_pixel_ops;
+extern const struct clk_ops clk_gfx3d_ops;
+extern const struct clk_ops clk_rcg2_shared_ops;
+
+#endif
diff --git a/rr-cache/ceea2ce3f317f0e8444d0451756824b2d30128a7/preimage.2 b/rr-cache/ceea2ce3f317f0e8444d0451756824b2d30128a7/preimage.2
new file mode 100644
index 0000000..77f16aa
--- /dev/null
+++ b/rr-cache/ceea2ce3f317f0e8444d0451756824b2d30128a7/preimage.2
@@ -0,0 +1,184 @@
+<<<<<<<
+/*
+ * Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+=======
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved. */
+>>>>>>>
+
+#ifndef __QCOM_CLK_RCG_H__
+#define __QCOM_CLK_RCG_H__
+
+#include <linux/clk-provider.h>
+#include "clk-regmap.h"
+
+struct freq_tbl {
+ unsigned long freq;
+ u8 src;
+ u8 pre_div;
+ u16 m;
+ u16 n;
+};
+
+/**
+ * struct mn - M/N:D counter
+ * @mnctr_en_bit: bit to enable mn counter
+ * @mnctr_reset_bit: bit to assert mn counter reset
+ * @mnctr_mode_shift: lowest bit of mn counter mode field
+ * @n_val_shift: lowest bit of n value field
+ * @m_val_shift: lowest bit of m value field
+ * @width: number of bits in m/n/d values
+ * @reset_in_cc: true if the mnctr_reset_bit is in the CC register
+ */
+struct mn {
+ u8 mnctr_en_bit;
+ u8 mnctr_reset_bit;
+ u8 mnctr_mode_shift;
+#define MNCTR_MODE_DUAL 0x2
+#define MNCTR_MODE_MASK 0x3
+ u8 n_val_shift;
+ u8 m_val_shift;
+ u8 width;
+ bool reset_in_cc;
+};
+
+/**
+ * struct pre_div - pre-divider
+ * @pre_div_shift: lowest bit of pre divider field
+ * @pre_div_width: number of bits in predivider
+ */
+struct pre_div {
+ u8 pre_div_shift;
+ u8 pre_div_width;
+};
+
+/**
+ * struct src_sel - source selector
+ * @src_sel_shift: lowest bit of source selection field
+ * @parent_map: map from software's parent index to hardware's src_sel field
+ */
+struct src_sel {
+ u8 src_sel_shift;
+#define SRC_SEL_MASK 0x7
+ const struct parent_map *parent_map;
+};
+
+/**
+ * struct clk_rcg - root clock generator
+ *
+ * @ns_reg: NS register
+ * @md_reg: MD register
+ * @mn: mn counter
+ * @p: pre divider
+ * @s: source selector
+ * @freq_tbl: frequency table
+ * @clkr: regmap clock handle
+ * @lock: register lock
+ *
+ */
+struct clk_rcg {
+ u32 ns_reg;
+ u32 md_reg;
+
+ struct mn mn;
+ struct pre_div p;
+ struct src_sel s;
+
+ const struct freq_tbl *freq_tbl;
+
+ struct clk_regmap clkr;
+};
+
+extern const struct clk_ops clk_rcg_ops;
+extern const struct clk_ops clk_rcg_bypass_ops;
+extern const struct clk_ops clk_rcg_bypass2_ops;
+extern const struct clk_ops clk_rcg_pixel_ops;
+extern const struct clk_ops clk_rcg_esc_ops;
+extern const struct clk_ops clk_rcg_lcc_ops;
+
+#define to_clk_rcg(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg, clkr)
+
+/**
+ * struct clk_dyn_rcg - root clock generator with glitch free mux
+ *
+ * @mux_sel_bit: bit to switch glitch free mux
+ * @ns_reg: NS0 and NS1 register
+ * @md_reg: MD0 and MD1 register
+ * @bank_reg: register to XOR @mux_sel_bit into to switch glitch free mux
+ * @mn: mn counter (banked)
+ * @s: source selector (banked)
+ * @freq_tbl: frequency table
+ * @clkr: regmap clock handle
+ * @lock: register lock
+ *
+ */
+struct clk_dyn_rcg {
+ u32 ns_reg[2];
+ u32 md_reg[2];
+ u32 bank_reg;
+
+ u8 mux_sel_bit;
+
+ struct mn mn[2];
+ struct pre_div p[2];
+ struct src_sel s[2];
+
+ const struct freq_tbl *freq_tbl;
+
+ struct clk_regmap clkr;
+};
+
+extern const struct clk_ops clk_dyn_rcg_ops;
+
+#define to_clk_dyn_rcg(_hw) \
+ container_of(to_clk_regmap(_hw), struct clk_dyn_rcg, clkr)
+
+/**
+ * struct clk_rcg2 - root clock generator
+ *
+ * @cmd_rcgr: corresponds to *_CMD_RCGR
+ * @mnd_width: number of bits in m/n/d values
+ * @hid_width: number of bits in half integer divider
+ * @safe_src_index: safe src index value
+ * @parent_map: map from software's parent index to hardware's src_sel field
+ * @freq_tbl: frequency table
+ * @current_freq: last cached frequency when using branches with shared RCGs
+ * @safe_src_freq_tbl : frequency table of safe source when using branches
+ * with shared RCGs
+ * @clkr: regmap clock handle
+ *
+ */
+struct clk_rcg2 {
+ u32 cmd_rcgr;
+ u8 mnd_width;
+ u8 hid_width;
+ u8 safe_src_index;
+ const struct parent_map *parent_map;
+ const struct freq_tbl *freq_tbl;
+ unsigned long current_freq;
+ const struct freq_tbl *safe_src_freq_tbl;
+ struct clk_regmap clkr;
+};
+
+#define to_clk_rcg2(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg2, clkr)
+
+extern const struct clk_ops clk_rcg2_ops;
+extern const struct clk_ops clk_rcg2_floor_ops;
+extern const struct clk_ops clk_edp_pixel_ops;
+extern const struct clk_ops clk_byte_ops;
+extern const struct clk_ops clk_byte2_ops;
+extern const struct clk_ops clk_pixel_ops;
+extern const struct clk_ops clk_gfx3d_ops;
+extern const struct clk_ops clk_rcg2_shared_ops;
+
+#endif
diff --git a/rr-cache/ceea2ce3f317f0e8444d0451756824b2d30128a7/preimage.3 b/rr-cache/ceea2ce3f317f0e8444d0451756824b2d30128a7/preimage.3
new file mode 100644
index 0000000..77f16aa
--- /dev/null
+++ b/rr-cache/ceea2ce3f317f0e8444d0451756824b2d30128a7/preimage.3
@@ -0,0 +1,184 @@
+<<<<<<<
+/*
+ * Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+=======
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved. */
+>>>>>>>
+
+#ifndef __QCOM_CLK_RCG_H__
+#define __QCOM_CLK_RCG_H__
+
+#include <linux/clk-provider.h>
+#include "clk-regmap.h"
+
+struct freq_tbl {
+ unsigned long freq;
+ u8 src;
+ u8 pre_div;
+ u16 m;
+ u16 n;
+};
+
+/**
+ * struct mn - M/N:D counter
+ * @mnctr_en_bit: bit to enable mn counter
+ * @mnctr_reset_bit: bit to assert mn counter reset
+ * @mnctr_mode_shift: lowest bit of mn counter mode field
+ * @n_val_shift: lowest bit of n value field
+ * @m_val_shift: lowest bit of m value field
+ * @width: number of bits in m/n/d values
+ * @reset_in_cc: true if the mnctr_reset_bit is in the CC register
+ */
+struct mn {
+ u8 mnctr_en_bit;
+ u8 mnctr_reset_bit;
+ u8 mnctr_mode_shift;
+#define MNCTR_MODE_DUAL 0x2
+#define MNCTR_MODE_MASK 0x3
+ u8 n_val_shift;
+ u8 m_val_shift;
+ u8 width;
+ bool reset_in_cc;
+};
+
+/**
+ * struct pre_div - pre-divider
+ * @pre_div_shift: lowest bit of pre divider field
+ * @pre_div_width: number of bits in predivider
+ */
+struct pre_div {
+ u8 pre_div_shift;
+ u8 pre_div_width;
+};
+
+/**
+ * struct src_sel - source selector
+ * @src_sel_shift: lowest bit of source selection field
+ * @parent_map: map from software's parent index to hardware's src_sel field
+ */
+struct src_sel {
+ u8 src_sel_shift;
+#define SRC_SEL_MASK 0x7
+ const struct parent_map *parent_map;
+};
+
+/**
+ * struct clk_rcg - root clock generator
+ *
+ * @ns_reg: NS register
+ * @md_reg: MD register
+ * @mn: mn counter
+ * @p: pre divider
+ * @s: source selector
+ * @freq_tbl: frequency table
+ * @clkr: regmap clock handle
+ * @lock: register lock
+ *
+ */
+struct clk_rcg {
+ u32 ns_reg;
+ u32 md_reg;
+
+ struct mn mn;
+ struct pre_div p;
+ struct src_sel s;
+
+ const struct freq_tbl *freq_tbl;
+
+ struct clk_regmap clkr;
+};
+
+extern const struct clk_ops clk_rcg_ops;
+extern const struct clk_ops clk_rcg_bypass_ops;
+extern const struct clk_ops clk_rcg_bypass2_ops;
+extern const struct clk_ops clk_rcg_pixel_ops;
+extern const struct clk_ops clk_rcg_esc_ops;
+extern const struct clk_ops clk_rcg_lcc_ops;
+
+#define to_clk_rcg(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg, clkr)
+
+/**
+ * struct clk_dyn_rcg - root clock generator with glitch free mux
+ *
+ * @mux_sel_bit: bit to switch glitch free mux
+ * @ns_reg: NS0 and NS1 register
+ * @md_reg: MD0 and MD1 register
+ * @bank_reg: register to XOR @mux_sel_bit into to switch glitch free mux
+ * @mn: mn counter (banked)
+ * @s: source selector (banked)
+ * @freq_tbl: frequency table
+ * @clkr: regmap clock handle
+ * @lock: register lock
+ *
+ */
+struct clk_dyn_rcg {
+ u32 ns_reg[2];
+ u32 md_reg[2];
+ u32 bank_reg;
+
+ u8 mux_sel_bit;
+
+ struct mn mn[2];
+ struct pre_div p[2];
+ struct src_sel s[2];
+
+ const struct freq_tbl *freq_tbl;
+
+ struct clk_regmap clkr;
+};
+
+extern const struct clk_ops clk_dyn_rcg_ops;
+
+#define to_clk_dyn_rcg(_hw) \
+ container_of(to_clk_regmap(_hw), struct clk_dyn_rcg, clkr)
+
+/**
+ * struct clk_rcg2 - root clock generator
+ *
+ * @cmd_rcgr: corresponds to *_CMD_RCGR
+ * @mnd_width: number of bits in m/n/d values
+ * @hid_width: number of bits in half integer divider
+ * @safe_src_index: safe src index value
+ * @parent_map: map from software's parent index to hardware's src_sel field
+ * @freq_tbl: frequency table
+ * @current_freq: last cached frequency when using branches with shared RCGs
+ * @safe_src_freq_tbl : frequency table of safe source when using branches
+ * with shared RCGs
+ * @clkr: regmap clock handle
+ *
+ */
+struct clk_rcg2 {
+ u32 cmd_rcgr;
+ u8 mnd_width;
+ u8 hid_width;
+ u8 safe_src_index;
+ const struct parent_map *parent_map;
+ const struct freq_tbl *freq_tbl;
+ unsigned long current_freq;
+ const struct freq_tbl *safe_src_freq_tbl;
+ struct clk_regmap clkr;
+};
+
+#define to_clk_rcg2(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg2, clkr)
+
+extern const struct clk_ops clk_rcg2_ops;
+extern const struct clk_ops clk_rcg2_floor_ops;
+extern const struct clk_ops clk_edp_pixel_ops;
+extern const struct clk_ops clk_byte_ops;
+extern const struct clk_ops clk_byte2_ops;
+extern const struct clk_ops clk_pixel_ops;
+extern const struct clk_ops clk_gfx3d_ops;
+extern const struct clk_ops clk_rcg2_shared_ops;
+
+#endif
diff --git a/rr-cache/d09618b40c98717b5b0fc8128f9144c0fa1d2fde/postimage b/rr-cache/d09618b40c98717b5b0fc8128f9144c0fa1d2fde/postimage
new file mode 100644
index 0000000..2f28819
--- /dev/null
+++ b/rr-cache/d09618b40c98717b5b0fc8128f9144c0fa1d2fde/postimage
@@ -0,0 +1,397 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SDM845 SoC device tree source
+ *
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+ interrupt-parent = <&intc>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen { };
+
+ memory@80000000 {
+ device_type = "memory";
+ /* We expect the bootloader to fill in the size */
+ reg = <0 0x80000000 0 0>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ memory@85fc0000 {
+ reg = <0 0x85fc0000 0 0x20000>;
+ no-map;
+ };
+
+ memory@85fe0000 {
+ compatible = "qcom,cmd-db";
+ reg = <0x0 0x85fe0000 0x0 0x20000>;
+ no-map;
+ };
+
+ smem_mem: memory@86000000 {
+ reg = <0x0 0x86000000 0x0 0x200000>;
+ no-map;
+ };
+
+ memory@86200000 {
+ reg = <0 0x86200000 0 0x2d00000>;
+ no-map;
+ };
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ CPU0: cpu@0 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ next-level-cache = <&L2_0>;
+ L2_0: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ L3_0: l3-cache {
+ compatible = "cache";
+ };
+ };
+ };
+
+ CPU1: cpu@100 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x100>;
+ enable-method = "psci";
+ next-level-cache = <&L2_100>;
+ L2_100: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU2: cpu@200 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x200>;
+ enable-method = "psci";
+ next-level-cache = <&L2_200>;
+ L2_200: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU3: cpu@300 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x300>;
+ enable-method = "psci";
+ next-level-cache = <&L2_300>;
+ L2_300: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU4: cpu@400 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x400>;
+ enable-method = "psci";
+ next-level-cache = <&L2_400>;
+ L2_400: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU5: cpu@500 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x500>;
+ enable-method = "psci";
+ next-level-cache = <&L2_500>;
+ L2_500: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU6: cpu@600 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x600>;
+ enable-method = "psci";
+ next-level-cache = <&L2_600>;
+ L2_600: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU7: cpu@700 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x700>;
+ enable-method = "psci";
+ next-level-cache = <&L2_700>;
+ L2_700: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 1 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 2 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 3 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 0 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ clocks {
+ xo_board: xo-board {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <38400000>;
+ clock-output-names = "xo_board";
+ };
+
+ sleep_clk: sleep-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32764>;
+ };
+ };
+
+ tcsr_mutex: hwlock {
+ compatible = "qcom,tcsr-mutex";
+ syscon = <&tcsr_mutex_regs 0 0x1000>;
+ #hwlock-cells = <1>;
+ };
+
+ smem {
+ compatible = "qcom,smem";
+ memory-region = <&smem_mem>;
+ hwlocks = <&tcsr_mutex 3>;
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ soc: soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0xffffffff>;
+ compatible = "simple-bus";
+
+ gcc: clock-controller@100000 {
+ compatible = "qcom,gcc-sdm845";
+ reg = <0x100000 0x1f0000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
+ tcsr_mutex_regs: syscon@1f40000 {
+ compatible = "syscon";
+ reg = <0x1f40000 0x40000>;
+ };
+
+ tlmm: pinctrl@3400000 {
+ compatible = "qcom,sdm845-pinctrl";
+ reg = <0x03400000 0xc00000>;
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ spmi_bus: spmi@c440000 {
+ compatible = "qcom,spmi-pmic-arb";
+ reg = <0xc440000 0x1100>,
+ <0xc600000 0x2000000>,
+ <0xe600000 0x100000>,
+ <0xe700000 0xa0000>,
+ <0xc40a000 0x26000>;
+ reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
+ interrupt-names = "periph_irq";
+ interrupts = <GIC_SPI 481 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,ee = <0>;
+ qcom,channel = <0>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ interrupt-controller;
+ #interrupt-cells = <4>;
+ cell-index = <0>;
+ };
+
+ apss_shared: mailbox@17990000 {
+ compatible = "qcom,sdm845-apss-shared";
+ reg = <0x17990000 0x1000>;
+ #mbox-cells = <1>;
+ };
+
+ apps_rsc: rsc@179e000 {
+ label = "apps_rsc";
+ compatible = "qcom,rpmh-rsc";
+ reg = <0x179e0000 0xd00>, <0x179e0d00 0x3000>;
+ reg-names = "drv", "tcs";
+ interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,drv-id = <2>;
+ qcom,tcs-config = <SLEEP_TCS 3>,
+ <WAKE_TCS 3>,
+ <ACTIVE_TCS 2>,
+ <CONTROL_TCS 1>;
+
+ rpmhcc: clock-controller {
+ compatible = "qcom,rpmh-clk-sdm845";
+ #clock-cells = <1>;
+ };
+
+ pm8998-rpmh-regulators {
+ compatible = "qcom,pm8998-rpmh-regulators";
+ qcom,pmic-id = "a";
+
+ pm8998_s1: smps1 {};
+ pm8998_s2: smps2 {};
+ pm8998_s3: smps3 {};
+ pm8998_s4: smps4 {};
+ pm8998_s5: smps5 {};
+ pm8998_s6: smps6 {};
+ pm8998_s7: smps7 {};
+ pm8998_s9: smps9 {};
+ pm8998_l1: ldo1 {};
+ pm8998_l2: ldo2 {};
+ pm8998_l3: ldo3 {};
+ pm8998_l4: ldo4 {};
+ pm8998_l5: ldo5 {};
+ pm8998_l6: ldo6 {};
+ pm8998_l7: ldo7 {};
+ pm8998_l8: ldo8 {};
+ pm8998_l9: ldo9 {};
+ pm8998_l10: ldo10 {};
+ pm8998_l11: ldo11 {};
+ pm8998_l12: ldo12 {};
+ pm8998_l13: ldo13 {};
+ pm8998_l14: ldo14 {};
+ pm8998_l15: ldo15 {};
+ pm8998_l16: ldo16 {};
+ pm8998_l17: ldo17 {};
+ pm8998_l18: ldo18 {};
+ pm8998_l19: ldo19 {};
+ pm8998_l20: ldo20 {};
+ pm8998_l21: ldo21 {};
+ pm8998_l22: ldo22 {};
+ pm8998_l23: ldo23 {};
+ pm8998_l24: ldo24 {};
+ pm8998_l25: ldo25 {};
+ pm8998_l26: ldo26 {};
+ pm8998_l27: ldo27 {};
+ pm8998_l28: ldo28 {};
+ pm8998_lvs1: lvs1 {};
+ pm8998_lvs2: lvs2 {};
+
+ };
+
+ pmi8998-rpmh-regulators {
+ compatible = "qcom,pmi8998-rpmh-regulators";
+ qcom,pmic-id = "b";
+
+ pmi8998_bob: bob {};
+ };
+ };
+
+ intc: interrupt-controller@17a00000 {
+ compatible = "arm,gic-v3";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ reg = <0x17a00000 0x10000>, /* GICD */
+ <0x17a60000 0x100000>; /* GICR * 8 */
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+
+ gic-its@17a40000 {
+ compatible = "arm,gic-v3-its";
+ msi-controller;
+ #msi-cells = <1>;
+ reg = <0x17a40000 0x20000>;
+ status = "disabled";
+ };
+ };
+
+ timer@17c90000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ compatible = "arm,armv7-timer-mem";
+ reg = <0x17c90000 0x1000>;
+
+ frame@17ca0000 {
+ frame-number = <0>;
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17ca0000 0x1000>,
+ <0x17cb0000 0x1000>;
+ };
+
+ frame@17cc0000 {
+ frame-number = <1>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17cc0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17cd0000 {
+ frame-number = <2>;
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17cd0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17ce0000 {
+ frame-number = <3>;
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17ce0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17cf0000 {
+ frame-number = <4>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17cf0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17d00000 {
+ frame-number = <5>;
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17d00000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17d10000 {
+ frame-number = <6>;
+ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17d10000 0x1000>;
+ status = "disabled";
+ };
+ };
+ };
+};
diff --git a/rr-cache/d09618b40c98717b5b0fc8128f9144c0fa1d2fde/preimage b/rr-cache/d09618b40c98717b5b0fc8128f9144c0fa1d2fde/preimage
new file mode 100644
index 0000000..948796d
--- /dev/null
+++ b/rr-cache/d09618b40c98717b5b0fc8128f9144c0fa1d2fde/preimage
@@ -0,0 +1,457 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SDM845 SoC device tree source
+ *
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+ interrupt-parent = <&intc>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen { };
+
+ memory@80000000 {
+ device_type = "memory";
+ /* We expect the bootloader to fill in the size */
+ reg = <0 0x80000000 0 0>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ memory@85fc0000 {
+ reg = <0 0x85fc0000 0 0x20000>;
+ no-map;
+ };
+
+ memory@85fe0000 {
+ compatible = "qcom,cmd-db";
+ reg = <0x0 0x85fe0000 0x0 0x20000>;
+ no-map;
+ };
+
+ smem_mem: memory@86000000 {
+ reg = <0x0 0x86000000 0x0 0x200000>;
+ no-map;
+ };
+
+ memory@86200000 {
+ reg = <0 0x86200000 0 0x2d00000>;
+ no-map;
+ };
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ CPU0: cpu@0 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ next-level-cache = <&L2_0>;
+ L2_0: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ L3_0: l3-cache {
+ compatible = "cache";
+ };
+ };
+ };
+
+ CPU1: cpu@100 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x100>;
+ enable-method = "psci";
+ next-level-cache = <&L2_100>;
+ L2_100: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU2: cpu@200 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x200>;
+ enable-method = "psci";
+ next-level-cache = <&L2_200>;
+ L2_200: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU3: cpu@300 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x300>;
+ enable-method = "psci";
+ next-level-cache = <&L2_300>;
+ L2_300: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU4: cpu@400 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x400>;
+ enable-method = "psci";
+ next-level-cache = <&L2_400>;
+ L2_400: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU5: cpu@500 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x500>;
+ enable-method = "psci";
+ next-level-cache = <&L2_500>;
+ L2_500: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU6: cpu@600 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x600>;
+ enable-method = "psci";
+ next-level-cache = <&L2_600>;
+ L2_600: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU7: cpu@700 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x700>;
+ enable-method = "psci";
+ next-level-cache = <&L2_700>;
+ L2_700: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 1 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 2 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 3 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 0 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ clocks {
+ xo_board: xo-board {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <38400000>;
+ clock-output-names = "xo_board";
+ };
+
+ sleep_clk: sleep-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32764>;
+ };
+ };
+
+ tcsr_mutex: hwlock {
+ compatible = "qcom,tcsr-mutex";
+ syscon = <&tcsr_mutex_regs 0 0x1000>;
+ #hwlock-cells = <1>;
+ };
+
+ smem {
+ compatible = "qcom,smem";
+ memory-region = <&smem_mem>;
+ hwlocks = <&tcsr_mutex 3>;
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ soc: soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0xffffffff>;
+ compatible = "simple-bus";
+
+ gcc: clock-controller@100000 {
+ compatible = "qcom,gcc-sdm845";
+ reg = <0x100000 0x1f0000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
+ tcsr_mutex_regs: syscon@1f40000 {
+ compatible = "syscon";
+ reg = <0x1f40000 0x40000>;
+ };
+
+ tlmm: pinctrl@3400000 {
+ compatible = "qcom,sdm845-pinctrl";
+ reg = <0x03400000 0xc00000>;
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ spmi_bus: spmi@c440000 {
+ compatible = "qcom,spmi-pmic-arb";
+ reg = <0xc440000 0x1100>,
+ <0xc600000 0x2000000>,
+ <0xe600000 0x100000>,
+ <0xe700000 0xa0000>,
+ <0xc40a000 0x26000>;
+ reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
+ interrupt-names = "periph_irq";
+ interrupts = <GIC_SPI 481 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,ee = <0>;
+ qcom,channel = <0>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ interrupt-controller;
+ #interrupt-cells = <4>;
+ cell-index = <0>;
+ };
+
+ apss_shared: mailbox@17990000 {
+ compatible = "qcom,sdm845-apss-shared";
+ reg = <0x17990000 0x1000>;
+ #mbox-cells = <1>;
+ };
+
+ intc: interrupt-controller@17a00000 {
+ compatible = "arm,gic-v3";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ reg = <0x17a00000 0x10000>, /* GICD */
+ <0x17a60000 0x100000>; /* GICR * 8 */
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+
+ gic-its@17a40000 {
+ compatible = "arm,gic-v3-its";
+ msi-controller;
+ #msi-cells = <1>;
+ reg = <0x17a40000 0x20000>;
+ status = "disabled";
+ };
+ };
+
+ timer@17c90000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ compatible = "arm,armv7-timer-mem";
+ reg = <0x17c90000 0x1000>;
+
+ frame@17ca0000 {
+ frame-number = <0>;
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17ca0000 0x1000>,
+ <0x17cb0000 0x1000>;
+ };
+
+ frame@17cc0000 {
+ frame-number = <1>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17cc0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17cd0000 {
+ frame-number = <2>;
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17cd0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17ce0000 {
+ frame-number = <3>;
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17ce0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17cf0000 {
+ frame-number = <4>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17cf0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17d00000 {
+ frame-number = <5>;
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17d00000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17d10000 {
+ frame-number = <6>;
+ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17d10000 0x1000>;
+ status = "disabled";
+ };
+ };
+<<<<<<<
+=======
+
+ spmi_bus: spmi@c440000 {
+ compatible = "qcom,spmi-pmic-arb";
+ reg = <0xc440000 0x1100>,
+ <0xc600000 0x2000000>,
+ <0xe600000 0x100000>,
+ <0xe700000 0xa0000>,
+ <0xc40a000 0x26000>;
+ reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
+ interrupt-names = "periph_irq";
+ interrupts = <GIC_SPI 481 IRQ_TYPE_NONE>;
+ qcom,ee = <0>;
+ qcom,channel = <0>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ interrupt-controller;
+ #interrupt-cells = <4>;
+ cell-index = <0>;
+ };
+
+ geniqup@ac0000 {
+ compatible = "qcom,geni-se-qup";
+ reg = <0xac0000 0x6000>;
+ clock-names = "m-ahb", "s-ahb";
+ clocks = <&gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+ <&gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ status = "disabled";
+
+ uart2: serial@a84000 {
+ compatible = "qcom,geni-debug-uart";
+ reg = <0xa84000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S1_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qup_uart2_default>;
+ pinctrl-1 = <&qup_uart2_sleep>;
+ interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ i2c10: i2c@a88000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0xa88000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S2_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qup_i2c10_default>;
+ pinctrl-1 = <&qup_i2c10_sleep>;
+ interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+ };
+
+ apps_rsc: rsc@179e000 {
+ label = "apps_rsc";
+ compatible = "qcom,rpmh-rsc";
+ reg = <0x179e0000 0xd00>, <0x179e0d00 0x3000>;
+ reg-names = "drv", "tcs";
+ interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,drv-id = <2>;
+ qcom,tcs-config = <SLEEP_TCS 3>,
+ <WAKE_TCS 3>,
+ <ACTIVE_TCS 2>,
+ <CONTROL_TCS 1>;
+
+ rpmhcc: clock-controller {
+ compatible = "qcom,rpmh-clk-sdm845";
+ #clock-cells = <1>;
+ };
+
+ pm8998-rpmh-regulators {
+ compatible = "qcom,pm8998-rpmh-regulators";
+ qcom,pmic-id = "a";
+
+ pm8998_s1: smps1 {};
+ pm8998_s2: smps2 {};
+ pm8998_s3: smps3 {};
+ pm8998_s4: smps4 {};
+ pm8998_s5: smps5 {};
+ pm8998_s6: smps6 {};
+ pm8998_s7: smps7 {};
+ pm8998_s9: smps9 {};
+ pm8998_l1: ldo1 {};
+ pm8998_l2: ldo2 {};
+ pm8998_l3: ldo3 {};
+ pm8998_l4: ldo4 {};
+ pm8998_l5: ldo5 {};
+ pm8998_l6: ldo6 {};
+ pm8998_l7: ldo7 {};
+ pm8998_l8: ldo8 {};
+ pm8998_l9: ldo9 {};
+ pm8998_l10: ldo10 {};
+ pm8998_l11: ldo11 {};
+ pm8998_l12: ldo12 {};
+ pm8998_l13: ldo13 {};
+ pm8998_l14: ldo14 {};
+ pm8998_l15: ldo15 {};
+ pm8998_l16: ldo16 {};
+ pm8998_l17: ldo17 {};
+ pm8998_l18: ldo18 {};
+ pm8998_l19: ldo19 {};
+ pm8998_l20: ldo20 {};
+ pm8998_l21: ldo21 {};
+ pm8998_l22: ldo22 {};
+ pm8998_l23: ldo23 {};
+ pm8998_l24: ldo24 {};
+ pm8998_l25: ldo25 {};
+ pm8998_l26: ldo26 {};
+ pm8998_l27: ldo27 {};
+ pm8998_l28: ldo28 {};
+ pm8998_lvs1: lvs1 {};
+ pm8998_lvs2: lvs2 {};
+
+ };
+
+ pmi8998-rpmh-regulators {
+ compatible = "qcom,pmi8998-rpmh-regulators";
+ qcom,pmic-id = "b";
+
+ pmi8998_bob: bob {};
+ };
+ };
+>>>>>>>
+ };
+};
diff --git a/rr-cache/d1c7ff7b4ef48a1df740523a59e433002bf72915/postimage b/rr-cache/d1c7ff7b4ef48a1df740523a59e433002bf72915/postimage
new file mode 100644
index 0000000..8a1e8e9
--- /dev/null
+++ b/rr-cache/d1c7ff7b4ef48a1df740523a59e433002bf72915/postimage
@@ -0,0 +1,383 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+// Copyright (c) 2018, Linaro Limited
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/idr.h>
+#include <linux/slab.h>
+#include <linux/of_device.h>
+#include <linux/soc/qcom/apr.h>
+#include <linux/rpmsg.h>
+#include <linux/of.h>
+
+struct apr {
+ struct rpmsg_endpoint *ch;
+ struct device *dev;
+ spinlock_t svcs_lock;
+ struct idr svcs_idr;
+ int dest_domain_id;
+};
+
+/**
+ * apr_send_pkt() - Send a apr message from apr device
+ *
+ * @adev: Pointer to previously registered apr device.
+ * @pkt: Pointer to apr packet to send
+ *
+ * Return: Will be an negative on packet size on success.
+ */
+int apr_send_pkt(struct apr_device *adev, struct apr_pkt *pkt)
+{
+ struct apr *apr = dev_get_drvdata(adev->dev.parent);
+ struct apr_hdr *hdr;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&adev->lock, flags);
+
+ hdr = &pkt->hdr;
+ hdr->src_domain = APR_DOMAIN_APPS;
+ hdr->src_svc = adev->svc_id;
+ hdr->dest_domain = adev->domain_id;
+ hdr->dest_svc = adev->svc_id;
+
+ ret = rpmsg_trysend(apr->ch, pkt, hdr->pkt_size);
+ spin_unlock_irqrestore(&adev->lock, flags);
+
+ return ret ? ret : hdr->pkt_size;
+}
+EXPORT_SYMBOL_GPL(apr_send_pkt);
+
+static void apr_dev_release(struct device *dev)
+{
+ struct apr_device *adev = to_apr_device(dev);
+
+ kfree(adev);
+}
+
+static int apr_callback(struct rpmsg_device *rpdev, void *buf,
+ int len, void *priv, u32 addr)
+{
+ struct apr *apr = dev_get_drvdata(&rpdev->dev);
+ uint16_t hdr_size, msg_type, ver, svc_id;
+ struct apr_device *svc = NULL;
+ struct apr_driver *adrv = NULL;
+ struct apr_resp_pkt resp;
+ struct apr_hdr *hdr;
+ unsigned long flags;
+
+ if (len <= APR_HDR_SIZE) {
+ dev_err(apr->dev, "APR: Improper apr pkt received:%p %d\n",
+ buf, len);
+ return -EINVAL;
+ }
+
+ hdr = buf;
+ ver = APR_HDR_FIELD_VER(hdr->hdr_field);
+ if (ver > APR_PKT_VER + 1)
+ return -EINVAL;
+
+ hdr_size = APR_HDR_FIELD_SIZE_BYTES(hdr->hdr_field);
+ if (hdr_size < APR_HDR_SIZE) {
+ dev_err(apr->dev, "APR: Wrong hdr size:%d\n", hdr_size);
+ return -EINVAL;
+ }
+
+ if (hdr->pkt_size < APR_HDR_SIZE || hdr->pkt_size != len) {
+ dev_err(apr->dev, "APR: Wrong paket size\n");
+ return -EINVAL;
+ }
+
+ msg_type = APR_HDR_FIELD_MT(hdr->hdr_field);
+ if (msg_type >= APR_MSG_TYPE_MAX) {
+ dev_err(apr->dev, "APR: Wrong message type: %d\n", msg_type);
+ return -EINVAL;
+ }
+
+ if (hdr->src_domain >= APR_DOMAIN_MAX ||
+ hdr->dest_domain >= APR_DOMAIN_MAX ||
+ hdr->src_svc >= APR_SVC_MAX ||
+ hdr->dest_svc >= APR_SVC_MAX) {
+ dev_err(apr->dev, "APR: Wrong APR header\n");
+ return -EINVAL;
+ }
+
+ svc_id = hdr->dest_svc;
+ spin_lock_irqsave(&apr->svcs_lock, flags);
+ svc = idr_find(&apr->svcs_idr, svc_id);
+ if (svc && svc->dev.driver)
+ adrv = to_apr_driver(svc->dev.driver);
+ spin_unlock_irqrestore(&apr->svcs_lock, flags);
+
+ if (!adrv) {
+ dev_err(apr->dev, "APR: service is not registered\n");
+ return -EINVAL;
+ }
+
+ resp.hdr = *hdr;
+ resp.payload_size = hdr->pkt_size - hdr_size;
+
+ /*
+ * NOTE: hdr_size is not same as APR_HDR_SIZE as remote can include
+ * optional headers in to apr_hdr which should be ignored
+ */
+ if (resp.payload_size > 0)
+ resp.payload = buf + hdr_size;
+
+ adrv->callback(svc, &resp);
+
+ return 0;
+}
+
+static int apr_device_match(struct device *dev, struct device_driver *drv)
+{
+ struct apr_device *adev = to_apr_device(dev);
+ struct apr_driver *adrv = to_apr_driver(drv);
+ const struct apr_device_id *id = adrv->id_table;
+
+ /* Attempt an OF style match first */
+ if (of_driver_match_device(dev, drv))
+ return 1;
+
+ if (!id)
+ return 0;
+
+ while (id->domain_id != 0 || id->svc_id != 0) {
+ if (id->domain_id == adev->domain_id &&
+ id->svc_id == adev->svc_id)
+ return 1;
+ id++;
+ }
+
+ return 0;
+}
+
+static int apr_device_probe(struct device *dev)
+{
+ struct apr_device *adev = to_apr_device(dev);
+ struct apr_driver *adrv = to_apr_driver(dev->driver);
+
+ return adrv->probe(adev);
+}
+
+static int apr_device_remove(struct device *dev)
+{
+ struct apr_device *adev = to_apr_device(dev);
+ struct apr_driver *adrv;
+ struct apr *apr = dev_get_drvdata(adev->dev.parent);
+
+ if (dev->driver) {
+ adrv = to_apr_driver(dev->driver);
+ if (adrv->remove)
+ adrv->remove(adev);
+ spin_lock(&apr->svcs_lock);
+ idr_remove(&apr->svcs_idr, adev->svc_id);
+ spin_unlock(&apr->svcs_lock);
+ }
+
+ return 0;
+}
+
+static int apr_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct apr_device *adev = to_apr_device(dev);
+ int ret;
+
+ ret = of_device_uevent_modalias(dev, env);
+ if (ret != -ENODEV)
+ return ret;
+
+ return add_uevent_var(env, "MODALIAS=apr:%s", adev->name);
+}
+
+struct bus_type aprbus = {
+ .name = "aprbus",
+ .match = apr_device_match,
+ .probe = apr_device_probe,
+ .uevent = apr_uevent,
+ .remove = apr_device_remove,
+};
+EXPORT_SYMBOL_GPL(aprbus);
+
+static int apr_add_device(struct device *dev, struct device_node *np,
+ const struct apr_device_id *id)
+{
+ struct apr *apr = dev_get_drvdata(dev);
+ struct apr_device *adev = NULL;
+ int ret;
+
+ adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+ if (!adev)
+ return -ENOMEM;
+
+ spin_lock_init(&adev->lock);
+
+ adev->svc_id = id->svc_id;
+ adev->domain_id = id->domain_id;
+ adev->version = id->svc_version;
+ if (np)
+ strncpy(adev->name, np->name, APR_NAME_SIZE);
+ else
+ strncpy(adev->name, id->name, APR_NAME_SIZE);
+
+ dev_set_name(&adev->dev, "aprsvc:%s:%x:%x", adev->name,
+ id->domain_id, id->svc_id);
+
+ adev->dev.bus = &aprbus;
+ adev->dev.parent = dev;
+ adev->dev.of_node = np;
+ adev->dev.release = apr_dev_release;
+ adev->dev.driver = NULL;
+
+ spin_lock(&apr->svcs_lock);
+ idr_alloc(&apr->svcs_idr, adev, id->svc_id,
+ id->svc_id + 1, GFP_ATOMIC);
+ spin_unlock(&apr->svcs_lock);
+
+ dev_info(dev, "Adding APR dev: %s\n", dev_name(&adev->dev));
+
+ ret = device_register(&adev->dev);
+ if (ret) {
+ dev_err(dev, "device_register failed: %d\n", ret);
+ put_device(&adev->dev);
+ }
+
+ return ret;
+}
+
+static void of_register_apr_devices(struct device *dev)
+{
+ struct apr *apr = dev_get_drvdata(dev);
+ struct device_node *node;
+
+ for_each_child_of_node(dev->of_node, node) {
+ struct apr_device_id id = { {0} };
+
+ if (of_property_read_u32(node, "reg", &id.svc_id))
+ continue;
+
+ id.domain_id = apr->dest_domain_id;
+
+ if (apr_add_device(dev, node, &id))
+ dev_err(dev, "Failed to add apr %d svc\n", id.svc_id);
+ }
+}
+
+static int apr_probe(struct rpmsg_device *rpdev)
+{
+ struct device *dev = &rpdev->dev;
+ struct apr *apr;
+ int ret;
+
+ apr = devm_kzalloc(dev, sizeof(*apr), GFP_KERNEL);
+ if (!apr)
+ return -ENOMEM;
+
+ ret = of_property_read_u32(dev->of_node, "reg", &apr->dest_domain_id);
+ if (ret) {
+ dev_err(dev, "APR Domain ID not specified in DT\n");
+ return ret;
+ }
+
+ dev_set_drvdata(dev, apr);
+ apr->ch = rpdev->ept;
+ apr->dev = dev;
+ spin_lock_init(&apr->svcs_lock);
+ idr_init(&apr->svcs_idr);
+ of_register_apr_devices(dev);
+
+ return 0;
+}
+
+static int apr_remove_device(struct device *dev, void *null)
+{
+ struct apr_device *adev = to_apr_device(dev);
+
+ device_unregister(&adev->dev);
+
+ return 0;
+}
+
+static void apr_remove(struct rpmsg_device *rpdev)
+{
+ device_for_each_child(&rpdev->dev, NULL, apr_remove_device);
+}
+
+/*
+ * __apr_driver_register() - Client driver registration with aprbus
+ *
+ * @drv:Client driver to be associated with client-device.
+ * @owner: owning module/driver
+ *
+ * This API will register the client driver with the aprbus
+ * It is called from the driver's module-init function.
+ */
+int __apr_driver_register(struct apr_driver *drv, struct module *owner)
+{
+ drv->driver.bus = &aprbus;
+ drv->driver.owner = owner;
+
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(__apr_driver_register);
+
+/*
+ * apr_driver_unregister() - Undo effect of apr_driver_register
+ *
+ * @drv: Client driver to be unregistered
+ */
+void apr_driver_unregister(struct apr_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(apr_driver_unregister);
+
+static const struct of_device_id apr_of_match[] = {
+ { .compatible = "qcom,apr"},
+ { .compatible = "qcom,apr-v2"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, apr_of_match);
+
+static struct rpmsg_driver apr_driver = {
+ .probe = apr_probe,
+ .remove = apr_remove,
+ .callback = apr_callback,
+ .drv = {
+ .name = "qcom,apr",
+ .of_match_table = apr_of_match,
+ },
+};
+
+static int __init apr_init(void)
+{
+ int ret;
+
+ ret = bus_register(&aprbus);
+ if (!ret)
+ ret = register_rpmsg_driver(&apr_driver);
+ else
+ bus_unregister(&aprbus);
+
+ return ret;
+}
+
+static void __exit apr_exit(void)
+{
+ bus_unregister(&aprbus);
+ unregister_rpmsg_driver(&apr_driver);
+}
+
+subsys_initcall(apr_init);
+module_exit(apr_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm APR Bus");
diff --git a/rr-cache/d1c7ff7b4ef48a1df740523a59e433002bf72915/preimage b/rr-cache/d1c7ff7b4ef48a1df740523a59e433002bf72915/preimage
new file mode 100644
index 0000000..1e7c6d3
--- /dev/null
+++ b/rr-cache/d1c7ff7b4ef48a1df740523a59e433002bf72915/preimage
@@ -0,0 +1,558 @@
+// SPDX-License-Identifier: GPL-2.0
+<<<<<<<
+/*
+ * Copyright (c) 2011-2017, The Linux Foundation
+ * Copyright (c) 2018, Linaro Limited
+ */
+=======
+// Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+// Copyright (c) 2018, Linaro Limited
+>>>>>>>
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+<<<<<<<
+#include <linux/idr.h>
+=======
+#include <linux/list.h>
+>>>>>>>
+#include <linux/slab.h>
+#include <linux/of_device.h>
+#include <linux/soc/qcom/apr.h>
+#include <linux/rpmsg.h>
+#include <linux/of.h>
+
+struct apr {
+ struct rpmsg_endpoint *ch;
+ struct device *dev;
+ spinlock_t svcs_lock;
+<<<<<<<
+ struct idr svcs_idr;
+=======
+ struct list_head svcs;
+>>>>>>>
+ int dest_domain_id;
+};
+
+/**
+ * apr_send_pkt() - Send a apr message from apr device
+ *
+ * @adev: Pointer to previously registered apr device.
+<<<<<<<
+ * @buf: Pointer to buffer to send
+ *
+ * Return: Will be an negative on packet size on success.
+ */
+int apr_send_pkt(struct apr_device *adev, void *buf)
+=======
+ * @pkt: Pointer to apr packet to send
+ *
+ * Return: Will be an negative on packet size on success.
+ */
+int apr_send_pkt(struct apr_device *adev, struct apr_pkt *pkt)
+>>>>>>>
+{
+ struct apr *apr = dev_get_drvdata(adev->dev.parent);
+ struct apr_hdr *hdr;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&adev->lock, flags);
+
+<<<<<<<
+ hdr = &pkt->hdr;
+=======
+ hdr = (struct apr_hdr *)buf;
+>>>>>>>
+ hdr->src_domain = APR_DOMAIN_APPS;
+ hdr->src_svc = adev->svc_id;
+ hdr->dest_domain = adev->domain_id;
+ hdr->dest_svc = adev->svc_id;
+
+<<<<<<<
+ ret = rpmsg_send(apr->ch, buf, hdr->pkt_size);
+ if (ret) {
+ dev_err(&adev->dev, "Unable to send APR pkt %d\n",
+ hdr->pkt_size);
+ } else {
+ ret = hdr->pkt_size;
+ }
+
+ spin_unlock_irqrestore(&adev->lock, flags);
+
+ return ret;
+=======
+ ret = rpmsg_trysend(apr->ch, pkt, hdr->pkt_size);
+ spin_unlock_irqrestore(&adev->lock, flags);
+
+ return ret ? ret : hdr->pkt_size;
+>>>>>>>
+}
+EXPORT_SYMBOL_GPL(apr_send_pkt);
+
+static void apr_dev_release(struct device *dev)
+{
+ struct apr_device *adev = to_apr_device(dev);
+
+ kfree(adev);
+}
+
+static int apr_callback(struct rpmsg_device *rpdev, void *buf,
+ int len, void *priv, u32 addr)
+{
+ struct apr *apr = dev_get_drvdata(&rpdev->dev);
+<<<<<<<
+ struct apr_client_message data;
+ struct apr_device *p, *c_svc = NULL;
+ struct apr_driver *adrv = NULL;
+ struct apr_hdr *hdr;
+ uint16_t hdr_size;
+ uint16_t msg_type;
+ uint16_t ver;
+ uint16_t svc;
+=======
+ uint16_t hdr_size, msg_type, ver, svc_id;
+ struct apr_device *svc = NULL;
+ struct apr_driver *adrv = NULL;
+ struct apr_resp_pkt resp;
+ struct apr_hdr *hdr;
+ unsigned long flags;
+>>>>>>>
+
+ if (len <= APR_HDR_SIZE) {
+ dev_err(apr->dev, "APR: Improper apr pkt received:%p %d\n",
+ buf, len);
+ return -EINVAL;
+ }
+
+ hdr = buf;
+ ver = APR_HDR_FIELD_VER(hdr->hdr_field);
+ if (ver > APR_PKT_VER + 1)
+ return -EINVAL;
+
+ hdr_size = APR_HDR_FIELD_SIZE_BYTES(hdr->hdr_field);
+ if (hdr_size < APR_HDR_SIZE) {
+ dev_err(apr->dev, "APR: Wrong hdr size:%d\n", hdr_size);
+ return -EINVAL;
+ }
+
+<<<<<<<
+ if (hdr->pkt_size < APR_HDR_SIZE || hdr->pkt_size != len) {
+=======
+ if (hdr->pkt_size < APR_HDR_SIZE) {
+>>>>>>>
+ dev_err(apr->dev, "APR: Wrong paket size\n");
+ return -EINVAL;
+ }
+
+ msg_type = APR_HDR_FIELD_MT(hdr->hdr_field);
+<<<<<<<
+ if (msg_type >= APR_MSG_TYPE_MAX && msg_type != APR_BASIC_RSP_RESULT) {
+=======
+ if (msg_type >= APR_MSG_TYPE_MAX) {
+>>>>>>>
+ dev_err(apr->dev, "APR: Wrong message type: %d\n", msg_type);
+ return -EINVAL;
+ }
+
+ if (hdr->src_domain >= APR_DOMAIN_MAX ||
+ hdr->dest_domain >= APR_DOMAIN_MAX ||
+ hdr->src_svc >= APR_SVC_MAX ||
+ hdr->dest_svc >= APR_SVC_MAX) {
+ dev_err(apr->dev, "APR: Wrong APR header\n");
+ return -EINVAL;
+ }
+
+<<<<<<<
+ svc = hdr->dest_svc;
+ spin_lock(&apr->svcs_lock);
+ list_for_each_entry(p, &apr->svcs, node) {
+ if (svc == p->svc_id) {
+ c_svc = p;
+ if (c_svc->dev.driver)
+ adrv = to_apr_driver(c_svc->dev.driver);
+ break;
+ }
+ }
+ spin_unlock(&apr->svcs_lock);
+=======
+ svc_id = hdr->dest_svc;
+ spin_lock_irqsave(&apr->svcs_lock, flags);
+ svc = idr_find(&apr->svcs_idr, svc_id);
+ if (svc && svc->dev.driver)
+ adrv = to_apr_driver(svc->dev.driver);
+ spin_unlock_irqrestore(&apr->svcs_lock, flags);
+>>>>>>>
+
+ if (!adrv) {
+ dev_err(apr->dev, "APR: service is not registered\n");
+ return -EINVAL;
+ }
+
+<<<<<<<
+ data.payload_size = hdr->pkt_size - hdr_size;
+ data.opcode = hdr->opcode;
+ data.src_port = hdr->src_port;
+ data.dest_port = hdr->dest_port;
+ data.token = hdr->token;
+ data.msg_type = msg_type;
+
+ if (data.payload_size > 0)
+ data.payload = buf + hdr_size;
+
+ adrv->callback(c_svc, &data);
+=======
+ resp.hdr = *hdr;
+ resp.payload_size = hdr->pkt_size - hdr_size;
+
+ /*
+ * NOTE: hdr_size is not same as APR_HDR_SIZE as remote can include
+ * optional headers in to apr_hdr which should be ignored
+ */
+ if (resp.payload_size > 0)
+ resp.payload = buf + hdr_size;
+
+ adrv->callback(svc, &resp);
+>>>>>>>
+
+ return 0;
+}
+
+static int apr_device_match(struct device *dev, struct device_driver *drv)
+{
+ struct apr_device *adev = to_apr_device(dev);
+ struct apr_driver *adrv = to_apr_driver(drv);
+ const struct apr_device_id *id = adrv->id_table;
+
+ /* Attempt an OF style match first */
+ if (of_driver_match_device(dev, drv))
+ return 1;
+
+ if (!id)
+ return 0;
+
+ while (id->domain_id != 0 || id->svc_id != 0) {
+ if (id->domain_id == adev->domain_id &&
+ id->svc_id == adev->svc_id)
+ return 1;
+ id++;
+ }
+
+ return 0;
+}
+
+static int apr_device_probe(struct device *dev)
+{
+ struct apr_device *adev = to_apr_device(dev);
+ struct apr_driver *adrv = to_apr_driver(dev->driver);
+
+ return adrv->probe(adev);
+}
+
+static int apr_device_remove(struct device *dev)
+{
+ struct apr_device *adev = to_apr_device(dev);
+ struct apr_driver *adrv;
+ struct apr *apr = dev_get_drvdata(adev->dev.parent);
+
+ if (dev->driver) {
+ adrv = to_apr_driver(dev->driver);
+ if (adrv->remove)
+ adrv->remove(adev);
+ spin_lock(&apr->svcs_lock);
+<<<<<<<
+ idr_remove(&apr->svcs_idr, adev->svc_id);
+=======
+ list_del(&adev->node);
+>>>>>>>
+ spin_unlock(&apr->svcs_lock);
+ }
+
+ return 0;
+}
+
+<<<<<<<
+static int apr_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct apr_device *adev = to_apr_device(dev);
+ int ret;
+
+ ret = of_device_uevent_modalias(dev, env);
+ if (ret != -ENODEV)
+ return ret;
+
+ return add_uevent_var(env, "MODALIAS=apr:%s", adev->name);
+}
+
+struct bus_type aprbus = {
+ .name = "aprbus",
+ .match = apr_device_match,
+ .probe = apr_device_probe,
+ .uevent = apr_uevent,
+ .remove = apr_device_remove,
+};
+EXPORT_SYMBOL_GPL(aprbus);
+
+static int apr_add_device(struct device *dev, struct device_node *np,
+ const struct apr_device_id *id)
+{
+ struct apr *apr = dev_get_drvdata(dev);
+ struct apr_device *adev = NULL;
+ int ret;
+=======
+struct bus_type aprbus_type = {
+ .name = "aprbus",
+ .match = apr_device_match,
+ .probe = apr_device_probe,
+ .remove = apr_device_remove,
+ .force_dma = true,
+};
+EXPORT_SYMBOL_GPL(aprbus_type);
+
+static int apr_add_device(struct device *dev, struct device_node *np,
+ const struct apr_device_id *id, bool is_svc)
+{
+ struct apr *apr = dev_get_drvdata(dev);
+ struct apr_device *adev = NULL;
+>>>>>>>
+
+ adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+ if (!adev)
+ return -ENOMEM;
+
+ spin_lock_init(&adev->lock);
+
+<<<<<<<
+ adev->svc_id = id->svc_id;
+ adev->domain_id = id->domain_id;
+ adev->version = id->svc_version;
+ if (np)
+ strncpy(adev->name, np->name, APR_NAME_SIZE);
+ else
+ strncpy(adev->name, id->name, APR_NAME_SIZE);
+
+ dev_set_name(&adev->dev, "aprsvc:%s:%x:%x", adev->name,
+ id->domain_id, id->svc_id);
+
+ adev->dev.bus = &aprbus;
+=======
+ if (is_svc) {
+ adev->svc_id = id->svc_id;
+ adev->domain_id = id->domain_id;
+ adev->version = id->svc_version;
+ dev_set_name(&adev->dev, "aprsvc:%s:%x:%x", id->name,
+ id->domain_id, id->svc_id);
+ } else {
+ dev_set_name(&adev->dev, "%s:%s", dev_name(dev), np->name);
+ }
+
+ adev->dev.bus = &aprbus_type;
+>>>>>>>
+ adev->dev.parent = dev;
+ adev->dev.of_node = np;
+ adev->dev.release = apr_dev_release;
+ adev->dev.driver = NULL;
+
+ spin_lock(&apr->svcs_lock);
+<<<<<<<
+ idr_alloc(&apr->svcs_idr, adev, id->svc_id,
+ id->svc_id + 1, GFP_ATOMIC);
+=======
+ list_add_tail(&adev->node, &apr->svcs);
+>>>>>>>
+ spin_unlock(&apr->svcs_lock);
+
+ dev_info(dev, "Adding APR dev: %s\n", dev_name(&adev->dev));
+
+<<<<<<<
+ ret = device_register(&adev->dev);
+ if (ret) {
+ dev_err(dev, "device_register failed: %d\n", ret);
+ put_device(&adev->dev);
+ }
+
+ return ret;
+=======
+ return device_register(&adev->dev);
+>>>>>>>
+}
+
+static void of_register_apr_devices(struct device *dev)
+{
+ struct apr *apr = dev_get_drvdata(dev);
+ struct device_node *node;
+
+ for_each_child_of_node(dev->of_node, node) {
+<<<<<<<
+ struct apr_device_id id = { {0} };
+
+ if (of_property_read_u32(node, "reg", &id.svc_id))
+ continue;
+
+ id.domain_id = apr->dest_domain_id;
+
+ if (apr_add_device(dev, node, &id))
+ dev_err(dev, "Failed to add apr %d svc\n", id.svc_id);
+=======
+ struct apr_device_id id = {0};
+ const char *svc_name;
+ bool is_svc = false;
+
+ if (of_find_property(node, "qcom,apr-svc-id", NULL) &&
+ of_find_property(node, "qcom,apr-svc-name", NULL)) {
+ /* svc node */
+ of_property_read_u32(node, "qcom,apr-svc-id",
+ &id.svc_id);
+ of_property_read_string(node, "qcom,apr-svc-name",
+ &svc_name);
+ id.domain_id = apr->dest_domain_id;
+
+ memcpy(id.name, svc_name, strlen(svc_name) + 1);
+ is_svc = true;
+ }
+
+ if (apr_add_device(dev, node, &id, is_svc))
+ dev_err(dev, "Failed to add arp %s svc\n", svc_name);
+>>>>>>>
+ }
+}
+
+static int apr_probe(struct rpmsg_device *rpdev)
+{
+ struct device *dev = &rpdev->dev;
+ struct apr *apr;
+ int ret;
+
+ apr = devm_kzalloc(dev, sizeof(*apr), GFP_KERNEL);
+ if (!apr)
+ return -ENOMEM;
+
+<<<<<<<
+ ret = of_property_read_u32(dev->of_node, "qcom,apr-dest-domain-id",
+ &apr->dest_domain_id);
+=======
+ ret = of_property_read_u32(dev->of_node, "reg", &apr->dest_domain_id);
+>>>>>>>
+ if (ret) {
+ dev_err(dev, "APR Domain ID not specified in DT\n");
+ return ret;
+ }
+
+ dev_set_drvdata(dev, apr);
+ apr->ch = rpdev->ept;
+ apr->dev = dev;
+<<<<<<<
+ INIT_LIST_HEAD(&apr->svcs);
+
+=======
+ spin_lock_init(&apr->svcs_lock);
+ idr_init(&apr->svcs_idr);
+>>>>>>>
+ of_register_apr_devices(dev);
+
+ return 0;
+}
+
+static int apr_remove_device(struct device *dev, void *null)
+{
+ struct apr_device *adev = to_apr_device(dev);
+
+ device_unregister(&adev->dev);
+
+ return 0;
+}
+
+static void apr_remove(struct rpmsg_device *rpdev)
+{
+ device_for_each_child(&rpdev->dev, NULL, apr_remove_device);
+}
+
+/*
+ * __apr_driver_register() - Client driver registration with aprbus
+ *
+ * @drv:Client driver to be associated with client-device.
+ * @owner: owning module/driver
+ *
+ * This API will register the client driver with the aprbus
+ * It is called from the driver's module-init function.
+ */
+int __apr_driver_register(struct apr_driver *drv, struct module *owner)
+{
+<<<<<<<
+ drv->driver.bus = &aprbus;
+=======
+ drv->driver.bus = &aprbus_type;
+>>>>>>>
+ drv->driver.owner = owner;
+
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(__apr_driver_register);
+
+/*
+ * apr_driver_unregister() - Undo effect of apr_driver_register
+ *
+ * @drv: Client driver to be unregistered
+ */
+void apr_driver_unregister(struct apr_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(apr_driver_unregister);
+
+static const struct of_device_id apr_of_match[] = {
+ { .compatible = "qcom,apr"},
+ { .compatible = "qcom,apr-v2"},
+ {}
+};
+<<<<<<<
+=======
+MODULE_DEVICE_TABLE(of, apr_of_match);
+>>>>>>>
+
+static struct rpmsg_driver apr_driver = {
+ .probe = apr_probe,
+ .remove = apr_remove,
+ .callback = apr_callback,
+ .drv = {
+ .name = "qcom,apr",
+ .of_match_table = apr_of_match,
+ },
+};
+
+static int __init apr_init(void)
+{
+ int ret;
+
+<<<<<<<
+ ret = bus_register(&aprbus);
+ if (!ret)
+ ret = register_rpmsg_driver(&apr_driver);
+ else
+ bus_unregister(&aprbus);
+=======
+ ret = bus_register(&aprbus_type);
+ if (!ret)
+ ret = register_rpmsg_driver(&apr_driver);
+>>>>>>>
+
+ return ret;
+}
+
+static void __exit apr_exit(void)
+{
+<<<<<<<
+ bus_unregister(&aprbus);
+=======
+ bus_unregister(&aprbus_type);
+>>>>>>>
+ unregister_rpmsg_driver(&apr_driver);
+}
+
+subsys_initcall(apr_init);
+module_exit(apr_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm APR Bus");
diff --git a/rr-cache/d1c7ff7b4ef48a1df740523a59e433002bf72915/thisimage b/rr-cache/d1c7ff7b4ef48a1df740523a59e433002bf72915/thisimage
new file mode 100644
index 0000000..1e7c6d3
--- /dev/null
+++ b/rr-cache/d1c7ff7b4ef48a1df740523a59e433002bf72915/thisimage
@@ -0,0 +1,558 @@
+// SPDX-License-Identifier: GPL-2.0
+<<<<<<<
+/*
+ * Copyright (c) 2011-2017, The Linux Foundation
+ * Copyright (c) 2018, Linaro Limited
+ */
+=======
+// Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+// Copyright (c) 2018, Linaro Limited
+>>>>>>>
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+<<<<<<<
+#include <linux/idr.h>
+=======
+#include <linux/list.h>
+>>>>>>>
+#include <linux/slab.h>
+#include <linux/of_device.h>
+#include <linux/soc/qcom/apr.h>
+#include <linux/rpmsg.h>
+#include <linux/of.h>
+
+struct apr {
+ struct rpmsg_endpoint *ch;
+ struct device *dev;
+ spinlock_t svcs_lock;
+<<<<<<<
+ struct idr svcs_idr;
+=======
+ struct list_head svcs;
+>>>>>>>
+ int dest_domain_id;
+};
+
+/**
+ * apr_send_pkt() - Send a apr message from apr device
+ *
+ * @adev: Pointer to previously registered apr device.
+<<<<<<<
+ * @buf: Pointer to buffer to send
+ *
+ * Return: Will be an negative on packet size on success.
+ */
+int apr_send_pkt(struct apr_device *adev, void *buf)
+=======
+ * @pkt: Pointer to apr packet to send
+ *
+ * Return: Will be an negative on packet size on success.
+ */
+int apr_send_pkt(struct apr_device *adev, struct apr_pkt *pkt)
+>>>>>>>
+{
+ struct apr *apr = dev_get_drvdata(adev->dev.parent);
+ struct apr_hdr *hdr;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&adev->lock, flags);
+
+<<<<<<<
+ hdr = &pkt->hdr;
+=======
+ hdr = (struct apr_hdr *)buf;
+>>>>>>>
+ hdr->src_domain = APR_DOMAIN_APPS;
+ hdr->src_svc = adev->svc_id;
+ hdr->dest_domain = adev->domain_id;
+ hdr->dest_svc = adev->svc_id;
+
+<<<<<<<
+ ret = rpmsg_send(apr->ch, buf, hdr->pkt_size);
+ if (ret) {
+ dev_err(&adev->dev, "Unable to send APR pkt %d\n",
+ hdr->pkt_size);
+ } else {
+ ret = hdr->pkt_size;
+ }
+
+ spin_unlock_irqrestore(&adev->lock, flags);
+
+ return ret;
+=======
+ ret = rpmsg_trysend(apr->ch, pkt, hdr->pkt_size);
+ spin_unlock_irqrestore(&adev->lock, flags);
+
+ return ret ? ret : hdr->pkt_size;
+>>>>>>>
+}
+EXPORT_SYMBOL_GPL(apr_send_pkt);
+
+static void apr_dev_release(struct device *dev)
+{
+ struct apr_device *adev = to_apr_device(dev);
+
+ kfree(adev);
+}
+
+static int apr_callback(struct rpmsg_device *rpdev, void *buf,
+ int len, void *priv, u32 addr)
+{
+ struct apr *apr = dev_get_drvdata(&rpdev->dev);
+<<<<<<<
+ struct apr_client_message data;
+ struct apr_device *p, *c_svc = NULL;
+ struct apr_driver *adrv = NULL;
+ struct apr_hdr *hdr;
+ uint16_t hdr_size;
+ uint16_t msg_type;
+ uint16_t ver;
+ uint16_t svc;
+=======
+ uint16_t hdr_size, msg_type, ver, svc_id;
+ struct apr_device *svc = NULL;
+ struct apr_driver *adrv = NULL;
+ struct apr_resp_pkt resp;
+ struct apr_hdr *hdr;
+ unsigned long flags;
+>>>>>>>
+
+ if (len <= APR_HDR_SIZE) {
+ dev_err(apr->dev, "APR: Improper apr pkt received:%p %d\n",
+ buf, len);
+ return -EINVAL;
+ }
+
+ hdr = buf;
+ ver = APR_HDR_FIELD_VER(hdr->hdr_field);
+ if (ver > APR_PKT_VER + 1)
+ return -EINVAL;
+
+ hdr_size = APR_HDR_FIELD_SIZE_BYTES(hdr->hdr_field);
+ if (hdr_size < APR_HDR_SIZE) {
+ dev_err(apr->dev, "APR: Wrong hdr size:%d\n", hdr_size);
+ return -EINVAL;
+ }
+
+<<<<<<<
+ if (hdr->pkt_size < APR_HDR_SIZE || hdr->pkt_size != len) {
+=======
+ if (hdr->pkt_size < APR_HDR_SIZE) {
+>>>>>>>
+ dev_err(apr->dev, "APR: Wrong paket size\n");
+ return -EINVAL;
+ }
+
+ msg_type = APR_HDR_FIELD_MT(hdr->hdr_field);
+<<<<<<<
+ if (msg_type >= APR_MSG_TYPE_MAX && msg_type != APR_BASIC_RSP_RESULT) {
+=======
+ if (msg_type >= APR_MSG_TYPE_MAX) {
+>>>>>>>
+ dev_err(apr->dev, "APR: Wrong message type: %d\n", msg_type);
+ return -EINVAL;
+ }
+
+ if (hdr->src_domain >= APR_DOMAIN_MAX ||
+ hdr->dest_domain >= APR_DOMAIN_MAX ||
+ hdr->src_svc >= APR_SVC_MAX ||
+ hdr->dest_svc >= APR_SVC_MAX) {
+ dev_err(apr->dev, "APR: Wrong APR header\n");
+ return -EINVAL;
+ }
+
+<<<<<<<
+ svc = hdr->dest_svc;
+ spin_lock(&apr->svcs_lock);
+ list_for_each_entry(p, &apr->svcs, node) {
+ if (svc == p->svc_id) {
+ c_svc = p;
+ if (c_svc->dev.driver)
+ adrv = to_apr_driver(c_svc->dev.driver);
+ break;
+ }
+ }
+ spin_unlock(&apr->svcs_lock);
+=======
+ svc_id = hdr->dest_svc;
+ spin_lock_irqsave(&apr->svcs_lock, flags);
+ svc = idr_find(&apr->svcs_idr, svc_id);
+ if (svc && svc->dev.driver)
+ adrv = to_apr_driver(svc->dev.driver);
+ spin_unlock_irqrestore(&apr->svcs_lock, flags);
+>>>>>>>
+
+ if (!adrv) {
+ dev_err(apr->dev, "APR: service is not registered\n");
+ return -EINVAL;
+ }
+
+<<<<<<<
+ data.payload_size = hdr->pkt_size - hdr_size;
+ data.opcode = hdr->opcode;
+ data.src_port = hdr->src_port;
+ data.dest_port = hdr->dest_port;
+ data.token = hdr->token;
+ data.msg_type = msg_type;
+
+ if (data.payload_size > 0)
+ data.payload = buf + hdr_size;
+
+ adrv->callback(c_svc, &data);
+=======
+ resp.hdr = *hdr;
+ resp.payload_size = hdr->pkt_size - hdr_size;
+
+ /*
+ * NOTE: hdr_size is not same as APR_HDR_SIZE as remote can include
+ * optional headers in to apr_hdr which should be ignored
+ */
+ if (resp.payload_size > 0)
+ resp.payload = buf + hdr_size;
+
+ adrv->callback(svc, &resp);
+>>>>>>>
+
+ return 0;
+}
+
+static int apr_device_match(struct device *dev, struct device_driver *drv)
+{
+ struct apr_device *adev = to_apr_device(dev);
+ struct apr_driver *adrv = to_apr_driver(drv);
+ const struct apr_device_id *id = adrv->id_table;
+
+ /* Attempt an OF style match first */
+ if (of_driver_match_device(dev, drv))
+ return 1;
+
+ if (!id)
+ return 0;
+
+ while (id->domain_id != 0 || id->svc_id != 0) {
+ if (id->domain_id == adev->domain_id &&
+ id->svc_id == adev->svc_id)
+ return 1;
+ id++;
+ }
+
+ return 0;
+}
+
+static int apr_device_probe(struct device *dev)
+{
+ struct apr_device *adev = to_apr_device(dev);
+ struct apr_driver *adrv = to_apr_driver(dev->driver);
+
+ return adrv->probe(adev);
+}
+
+static int apr_device_remove(struct device *dev)
+{
+ struct apr_device *adev = to_apr_device(dev);
+ struct apr_driver *adrv;
+ struct apr *apr = dev_get_drvdata(adev->dev.parent);
+
+ if (dev->driver) {
+ adrv = to_apr_driver(dev->driver);
+ if (adrv->remove)
+ adrv->remove(adev);
+ spin_lock(&apr->svcs_lock);
+<<<<<<<
+ idr_remove(&apr->svcs_idr, adev->svc_id);
+=======
+ list_del(&adev->node);
+>>>>>>>
+ spin_unlock(&apr->svcs_lock);
+ }
+
+ return 0;
+}
+
+<<<<<<<
+static int apr_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct apr_device *adev = to_apr_device(dev);
+ int ret;
+
+ ret = of_device_uevent_modalias(dev, env);
+ if (ret != -ENODEV)
+ return ret;
+
+ return add_uevent_var(env, "MODALIAS=apr:%s", adev->name);
+}
+
+struct bus_type aprbus = {
+ .name = "aprbus",
+ .match = apr_device_match,
+ .probe = apr_device_probe,
+ .uevent = apr_uevent,
+ .remove = apr_device_remove,
+};
+EXPORT_SYMBOL_GPL(aprbus);
+
+static int apr_add_device(struct device *dev, struct device_node *np,
+ const struct apr_device_id *id)
+{
+ struct apr *apr = dev_get_drvdata(dev);
+ struct apr_device *adev = NULL;
+ int ret;
+=======
+struct bus_type aprbus_type = {
+ .name = "aprbus",
+ .match = apr_device_match,
+ .probe = apr_device_probe,
+ .remove = apr_device_remove,
+ .force_dma = true,
+};
+EXPORT_SYMBOL_GPL(aprbus_type);
+
+static int apr_add_device(struct device *dev, struct device_node *np,
+ const struct apr_device_id *id, bool is_svc)
+{
+ struct apr *apr = dev_get_drvdata(dev);
+ struct apr_device *adev = NULL;
+>>>>>>>
+
+ adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+ if (!adev)
+ return -ENOMEM;
+
+ spin_lock_init(&adev->lock);
+
+<<<<<<<
+ adev->svc_id = id->svc_id;
+ adev->domain_id = id->domain_id;
+ adev->version = id->svc_version;
+ if (np)
+ strncpy(adev->name, np->name, APR_NAME_SIZE);
+ else
+ strncpy(adev->name, id->name, APR_NAME_SIZE);
+
+ dev_set_name(&adev->dev, "aprsvc:%s:%x:%x", adev->name,
+ id->domain_id, id->svc_id);
+
+ adev->dev.bus = &aprbus;
+=======
+ if (is_svc) {
+ adev->svc_id = id->svc_id;
+ adev->domain_id = id->domain_id;
+ adev->version = id->svc_version;
+ dev_set_name(&adev->dev, "aprsvc:%s:%x:%x", id->name,
+ id->domain_id, id->svc_id);
+ } else {
+ dev_set_name(&adev->dev, "%s:%s", dev_name(dev), np->name);
+ }
+
+ adev->dev.bus = &aprbus_type;
+>>>>>>>
+ adev->dev.parent = dev;
+ adev->dev.of_node = np;
+ adev->dev.release = apr_dev_release;
+ adev->dev.driver = NULL;
+
+ spin_lock(&apr->svcs_lock);
+<<<<<<<
+ idr_alloc(&apr->svcs_idr, adev, id->svc_id,
+ id->svc_id + 1, GFP_ATOMIC);
+=======
+ list_add_tail(&adev->node, &apr->svcs);
+>>>>>>>
+ spin_unlock(&apr->svcs_lock);
+
+ dev_info(dev, "Adding APR dev: %s\n", dev_name(&adev->dev));
+
+<<<<<<<
+ ret = device_register(&adev->dev);
+ if (ret) {
+ dev_err(dev, "device_register failed: %d\n", ret);
+ put_device(&adev->dev);
+ }
+
+ return ret;
+=======
+ return device_register(&adev->dev);
+>>>>>>>
+}
+
+static void of_register_apr_devices(struct device *dev)
+{
+ struct apr *apr = dev_get_drvdata(dev);
+ struct device_node *node;
+
+ for_each_child_of_node(dev->of_node, node) {
+<<<<<<<
+ struct apr_device_id id = { {0} };
+
+ if (of_property_read_u32(node, "reg", &id.svc_id))
+ continue;
+
+ id.domain_id = apr->dest_domain_id;
+
+ if (apr_add_device(dev, node, &id))
+ dev_err(dev, "Failed to add apr %d svc\n", id.svc_id);
+=======
+ struct apr_device_id id = {0};
+ const char *svc_name;
+ bool is_svc = false;
+
+ if (of_find_property(node, "qcom,apr-svc-id", NULL) &&
+ of_find_property(node, "qcom,apr-svc-name", NULL)) {
+ /* svc node */
+ of_property_read_u32(node, "qcom,apr-svc-id",
+ &id.svc_id);
+ of_property_read_string(node, "qcom,apr-svc-name",
+ &svc_name);
+ id.domain_id = apr->dest_domain_id;
+
+ memcpy(id.name, svc_name, strlen(svc_name) + 1);
+ is_svc = true;
+ }
+
+ if (apr_add_device(dev, node, &id, is_svc))
+ dev_err(dev, "Failed to add arp %s svc\n", svc_name);
+>>>>>>>
+ }
+}
+
+static int apr_probe(struct rpmsg_device *rpdev)
+{
+ struct device *dev = &rpdev->dev;
+ struct apr *apr;
+ int ret;
+
+ apr = devm_kzalloc(dev, sizeof(*apr), GFP_KERNEL);
+ if (!apr)
+ return -ENOMEM;
+
+<<<<<<<
+ ret = of_property_read_u32(dev->of_node, "qcom,apr-dest-domain-id",
+ &apr->dest_domain_id);
+=======
+ ret = of_property_read_u32(dev->of_node, "reg", &apr->dest_domain_id);
+>>>>>>>
+ if (ret) {
+ dev_err(dev, "APR Domain ID not specified in DT\n");
+ return ret;
+ }
+
+ dev_set_drvdata(dev, apr);
+ apr->ch = rpdev->ept;
+ apr->dev = dev;
+<<<<<<<
+ INIT_LIST_HEAD(&apr->svcs);
+
+=======
+ spin_lock_init(&apr->svcs_lock);
+ idr_init(&apr->svcs_idr);
+>>>>>>>
+ of_register_apr_devices(dev);
+
+ return 0;
+}
+
+static int apr_remove_device(struct device *dev, void *null)
+{
+ struct apr_device *adev = to_apr_device(dev);
+
+ device_unregister(&adev->dev);
+
+ return 0;
+}
+
+static void apr_remove(struct rpmsg_device *rpdev)
+{
+ device_for_each_child(&rpdev->dev, NULL, apr_remove_device);
+}
+
+/*
+ * __apr_driver_register() - Client driver registration with aprbus
+ *
+ * @drv:Client driver to be associated with client-device.
+ * @owner: owning module/driver
+ *
+ * This API will register the client driver with the aprbus
+ * It is called from the driver's module-init function.
+ */
+int __apr_driver_register(struct apr_driver *drv, struct module *owner)
+{
+<<<<<<<
+ drv->driver.bus = &aprbus;
+=======
+ drv->driver.bus = &aprbus_type;
+>>>>>>>
+ drv->driver.owner = owner;
+
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(__apr_driver_register);
+
+/*
+ * apr_driver_unregister() - Undo effect of apr_driver_register
+ *
+ * @drv: Client driver to be unregistered
+ */
+void apr_driver_unregister(struct apr_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(apr_driver_unregister);
+
+static const struct of_device_id apr_of_match[] = {
+ { .compatible = "qcom,apr"},
+ { .compatible = "qcom,apr-v2"},
+ {}
+};
+<<<<<<<
+=======
+MODULE_DEVICE_TABLE(of, apr_of_match);
+>>>>>>>
+
+static struct rpmsg_driver apr_driver = {
+ .probe = apr_probe,
+ .remove = apr_remove,
+ .callback = apr_callback,
+ .drv = {
+ .name = "qcom,apr",
+ .of_match_table = apr_of_match,
+ },
+};
+
+static int __init apr_init(void)
+{
+ int ret;
+
+<<<<<<<
+ ret = bus_register(&aprbus);
+ if (!ret)
+ ret = register_rpmsg_driver(&apr_driver);
+ else
+ bus_unregister(&aprbus);
+=======
+ ret = bus_register(&aprbus_type);
+ if (!ret)
+ ret = register_rpmsg_driver(&apr_driver);
+>>>>>>>
+
+ return ret;
+}
+
+static void __exit apr_exit(void)
+{
+<<<<<<<
+ bus_unregister(&aprbus);
+=======
+ bus_unregister(&aprbus_type);
+>>>>>>>
+ unregister_rpmsg_driver(&apr_driver);
+}
+
+subsys_initcall(apr_init);
+module_exit(apr_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm APR Bus");
diff --git a/rr-cache/d518adaa86824df51e502dbc536338e404075a83/thisimage b/rr-cache/d518adaa86824df51e502dbc536338e404075a83/thisimage
index d2ae564..39a7cda 100644
--- a/rr-cache/d518adaa86824df51e502dbc536338e404075a83/thisimage
+++ b/rr-cache/d518adaa86824df51e502dbc536338e404075a83/thisimage
@@ -1,4 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_QCOM_GENI_SE) += qcom-geni-se.o
+obj-$(CONFIG_QCOM_COMMAND_DB) += cmd-db.o
obj-$(CONFIG_QCOM_GLINK_SSR) += glink_ssr.o
obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o
diff --git a/rr-cache/d88ff7d9a0549d818568ec1a71b807a10ec5ff01/postimage b/rr-cache/d88ff7d9a0549d818568ec1a71b807a10ec5ff01/postimage
new file mode 100644
index 0000000..cb709e5
--- /dev/null
+++ b/rr-cache/d88ff7d9a0549d818568ec1a71b807a10ec5ff01/postimage
@@ -0,0 +1,33 @@
+Qualcomm Audio Device Manager (Q6ADM) binding
+
+Q6ADM is one of the APR audio service on Q6DSP.
+Please refer to qcom,apr.txt for details of the coommon apr service bindings
+used by the apr service device.
+
+- but must contain the following property:
+
+- compatible:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "qcom,q6adm-v<MAJOR-NUMBER>.<MINOR-NUMBER>".
+ Or "qcom,q6adm" where the version number can be queried
+ from DSP.
+ example "qcom,q6adm-v2.0"
+
+
+= ADM routing
+"routing" subnode of the ADM node represents adm routing specific configuration
+
+- #sound-dai-cells
+ Usage: required
+ Value type: <u32>
+ Definition: Must be 0
+
+= EXAMPLE
+q6adm@8 {
+ compatible = "qcom,q6adm";
+ reg = <APR_SVC_ADM>;
+ q6routing: routing {
+ #sound-dai-cells = <0>;
+ };
+};
diff --git a/rr-cache/d88ff7d9a0549d818568ec1a71b807a10ec5ff01/preimage b/rr-cache/d88ff7d9a0549d818568ec1a71b807a10ec5ff01/preimage
new file mode 100644
index 0000000..c072a1f
--- /dev/null
+++ b/rr-cache/d88ff7d9a0549d818568ec1a71b807a10ec5ff01/preimage
@@ -0,0 +1,55 @@
+Qualcomm Audio Device Manager (Q6ADM) binding
+
+Q6ADM is one of the APR audio service on Q6DSP.
+Please refer to qcom,apr.txt for details of the coommon apr service bindings
+used by the apr service device.
+
+- but must contain the following property:
+
+- compatible:
+ Usage: required
+ Value type: <stringlist>
+<<<<<<<
+ Definition: must be "qcom,adm-v<MAJOR-NUMBER>.<MINOR-NUMBER>".
+ example "qcom,adm-v2.0"
+
+- qcom,apr-svc-id
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Must be 8 for Audio Device Manager Service.
+
+- qcom,apr-svc-name
+ Usage: required
+ Value type: <stringlist>
+ Definition: Must be "ADM"
+
+= EXAMPLE
+
+q6adm: q6adm {
+ compatible = "qcom,q6adm-v2.0";
+ qcom,apr-svc-name = "ADM";
+ qcom,apr-svc-id = <APR_SVC_ADM>;
+=======
+ Definition: must be "qcom,q6adm-v<MAJOR-NUMBER>.<MINOR-NUMBER>".
+ Or "qcom,q6adm" where the version number can be queried
+ from DSP.
+ example "qcom,q6adm-v2.0"
+
+
+= ADM routing
+"routing" subnode of the ADM node represents adm routing specific configuration
+
+- #sound-dai-cells
+ Usage: required
+ Value type: <u32>
+ Definition: Must be 0
+
+= EXAMPLE
+q6adm@8 {
+ compatible = "qcom,q6adm";
+ reg = <APR_SVC_ADM>;
+ q6routing: routing {
+ #sound-dai-cells = <0>;
+ };
+>>>>>>>
+};
diff --git a/rr-cache/d88ff7d9a0549d818568ec1a71b807a10ec5ff01/thisimage b/rr-cache/d88ff7d9a0549d818568ec1a71b807a10ec5ff01/thisimage
new file mode 100644
index 0000000..c072a1f
--- /dev/null
+++ b/rr-cache/d88ff7d9a0549d818568ec1a71b807a10ec5ff01/thisimage
@@ -0,0 +1,55 @@
+Qualcomm Audio Device Manager (Q6ADM) binding
+
+Q6ADM is one of the APR audio service on Q6DSP.
+Please refer to qcom,apr.txt for details of the coommon apr service bindings
+used by the apr service device.
+
+- but must contain the following property:
+
+- compatible:
+ Usage: required
+ Value type: <stringlist>
+<<<<<<<
+ Definition: must be "qcom,adm-v<MAJOR-NUMBER>.<MINOR-NUMBER>".
+ example "qcom,adm-v2.0"
+
+- qcom,apr-svc-id
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Must be 8 for Audio Device Manager Service.
+
+- qcom,apr-svc-name
+ Usage: required
+ Value type: <stringlist>
+ Definition: Must be "ADM"
+
+= EXAMPLE
+
+q6adm: q6adm {
+ compatible = "qcom,q6adm-v2.0";
+ qcom,apr-svc-name = "ADM";
+ qcom,apr-svc-id = <APR_SVC_ADM>;
+=======
+ Definition: must be "qcom,q6adm-v<MAJOR-NUMBER>.<MINOR-NUMBER>".
+ Or "qcom,q6adm" where the version number can be queried
+ from DSP.
+ example "qcom,q6adm-v2.0"
+
+
+= ADM routing
+"routing" subnode of the ADM node represents adm routing specific configuration
+
+- #sound-dai-cells
+ Usage: required
+ Value type: <u32>
+ Definition: Must be 0
+
+= EXAMPLE
+q6adm@8 {
+ compatible = "qcom,q6adm";
+ reg = <APR_SVC_ADM>;
+ q6routing: routing {
+ #sound-dai-cells = <0>;
+ };
+>>>>>>>
+};
diff --git a/rr-cache/d933caf45b7472f2f16f0f80f807051a1eefe987/postimage b/rr-cache/d933caf45b7472f2f16f0f80f807051a1eefe987/postimage
new file mode 100644
index 0000000..6e8a9ab
--- /dev/null
+++ b/rr-cache/d933caf45b7472f2f16f0f80f807051a1eefe987/postimage
@@ -0,0 +1,66 @@
+Binding for the Qualcomm APCS global block
+==========================================
+
+This binding describes the APCS "global" block found in various Qualcomm
+platforms.
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: must be one of:
+ "qcom,msm8916-apcs-kpss-global",
+ "qcom,msm8996-apcs-hmss-global"
+ "qcom,msm8998-apcs-hmss-global"
+ "qcom,sdm845-apss-shared"
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: must specify the base address and size of the global block
+- clocks:
+ Usage: required if #clocks-cells property is present
+ Value type: <phandle>
+ Definition: phandle to the input PLL, which feeds the APCS mux/divider
+
+- #mbox-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: as described in mailbox.txt, must be 1
+
+- #clock-cells:
+ Usage: optional
+ Value type: <u32>
+ Definition: as described in clock.txt, must be 0
+
+
+= EXAMPLE
+The following example describes the APCS HMSS found in MSM8996 and part of the
+GLINK RPM referencing the "rpm_hlos" doorbell therein.
+
+ apcs_glb: mailbox@9820000 {
+ compatible = "qcom,msm8996-apcs-hmss-global";
+ reg = <0x9820000 0x1000>;
+
+ #mbox-cells = <1>;
+ };
+
+ rpm-glink {
+ compatible = "qcom,glink-rpm";
+
+ interrupts = <GIC_SPI 168 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,rpm-msg-ram = <&rpm_msg_ram>;
+
+ mboxes = <&apcs_glb 0>;
+ mbox-names = "rpm_hlos";
+ };
+
+Below is another example of the APCS binding on MSM8916 platforms:
+
+ apcs: mailbox@b011000 {
+ compatible = "qcom,msm8916-apcs-kpss-global";
+ reg = <0xb011000 0x1000>;
+ #mbox-cells = <1>;
+ clocks = <&a53pll>;
+ #clock-cells = <0>;
+ };
diff --git a/rr-cache/d933caf45b7472f2f16f0f80f807051a1eefe987/preimage b/rr-cache/d933caf45b7472f2f16f0f80f807051a1eefe987/preimage
new file mode 100644
index 0000000..81aae91
--- /dev/null
+++ b/rr-cache/d933caf45b7472f2f16f0f80f807051a1eefe987/preimage
@@ -0,0 +1,69 @@
+Binding for the Qualcomm APCS global block
+==========================================
+
+This binding describes the APCS "global" block found in various Qualcomm
+platforms.
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: must be one of:
+ "qcom,msm8916-apcs-kpss-global",
+ "qcom,msm8996-apcs-hmss-global"
+<<<<<<<
+=======
+ "qcom,msm8998-apcs-hmss-global"
+>>>>>>>
+ "qcom,sdm845-apss-shared"
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: must specify the base address and size of the global block
+- clocks:
+ Usage: required if #clocks-cells property is present
+ Value type: <phandle>
+ Definition: phandle to the input PLL, which feeds the APCS mux/divider
+
+- #mbox-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: as described in mailbox.txt, must be 1
+
+- #clock-cells:
+ Usage: optional
+ Value type: <u32>
+ Definition: as described in clock.txt, must be 0
+
+
+= EXAMPLE
+The following example describes the APCS HMSS found in MSM8996 and part of the
+GLINK RPM referencing the "rpm_hlos" doorbell therein.
+
+ apcs_glb: mailbox@9820000 {
+ compatible = "qcom,msm8996-apcs-hmss-global";
+ reg = <0x9820000 0x1000>;
+
+ #mbox-cells = <1>;
+ };
+
+ rpm-glink {
+ compatible = "qcom,glink-rpm";
+
+ interrupts = <GIC_SPI 168 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,rpm-msg-ram = <&rpm_msg_ram>;
+
+ mboxes = <&apcs_glb 0>;
+ mbox-names = "rpm_hlos";
+ };
+
+Below is another example of the APCS binding on MSM8916 platforms:
+
+ apcs: mailbox@b011000 {
+ compatible = "qcom,msm8916-apcs-kpss-global";
+ reg = <0xb011000 0x1000>;
+ #mbox-cells = <1>;
+ clocks = <&a53pll>;
+ #clock-cells = <0>;
+ };
diff --git a/rr-cache/d933caf45b7472f2f16f0f80f807051a1eefe987/thisimage b/rr-cache/d933caf45b7472f2f16f0f80f807051a1eefe987/thisimage
new file mode 100644
index 0000000..81aae91
--- /dev/null
+++ b/rr-cache/d933caf45b7472f2f16f0f80f807051a1eefe987/thisimage
@@ -0,0 +1,69 @@
+Binding for the Qualcomm APCS global block
+==========================================
+
+This binding describes the APCS "global" block found in various Qualcomm
+platforms.
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: must be one of:
+ "qcom,msm8916-apcs-kpss-global",
+ "qcom,msm8996-apcs-hmss-global"
+<<<<<<<
+=======
+ "qcom,msm8998-apcs-hmss-global"
+>>>>>>>
+ "qcom,sdm845-apss-shared"
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: must specify the base address and size of the global block
+- clocks:
+ Usage: required if #clocks-cells property is present
+ Value type: <phandle>
+ Definition: phandle to the input PLL, which feeds the APCS mux/divider
+
+- #mbox-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: as described in mailbox.txt, must be 1
+
+- #clock-cells:
+ Usage: optional
+ Value type: <u32>
+ Definition: as described in clock.txt, must be 0
+
+
+= EXAMPLE
+The following example describes the APCS HMSS found in MSM8996 and part of the
+GLINK RPM referencing the "rpm_hlos" doorbell therein.
+
+ apcs_glb: mailbox@9820000 {
+ compatible = "qcom,msm8996-apcs-hmss-global";
+ reg = <0x9820000 0x1000>;
+
+ #mbox-cells = <1>;
+ };
+
+ rpm-glink {
+ compatible = "qcom,glink-rpm";
+
+ interrupts = <GIC_SPI 168 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,rpm-msg-ram = <&rpm_msg_ram>;
+
+ mboxes = <&apcs_glb 0>;
+ mbox-names = "rpm_hlos";
+ };
+
+Below is another example of the APCS binding on MSM8916 platforms:
+
+ apcs: mailbox@b011000 {
+ compatible = "qcom,msm8916-apcs-kpss-global";
+ reg = <0xb011000 0x1000>;
+ #mbox-cells = <1>;
+ clocks = <&a53pll>;
+ #clock-cells = <0>;
+ };
diff --git a/rr-cache/da2f4fa23cdc2d2a73d81a6b53be31ece76d492c/postimage b/rr-cache/da2f4fa23cdc2d2a73d81a6b53be31ece76d492c/postimage
new file mode 100644
index 0000000..9983c66
--- /dev/null
+++ b/rr-cache/da2f4fa23cdc2d2a73d81a6b53be31ece76d492c/postimage
@@ -0,0 +1,646 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+// Copyright (c) 2018, Linaro Limited
+
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/jiffies.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/kref.h>
+#include <linux/wait.h>
+#include <linux/soc/qcom/apr.h>
+#include <linux/platform_device.h>
+#include <sound/asound.h>
+#include "q6adm.h"
+#include "q6afe.h"
+#include "q6core.h"
+#include "q6dsp-errno.h"
+#include "q6dsp-common.h"
+
+#define ADM_CMD_DEVICE_OPEN_V5 0x00010326
+#define ADM_CMDRSP_DEVICE_OPEN_V5 0x00010329
+#define ADM_CMD_DEVICE_CLOSE_V5 0x00010327
+#define ADM_CMD_MATRIX_MAP_ROUTINGS_V5 0x00010325
+
+#define TIMEOUT_MS 1000
+#define RESET_COPP_ID 99
+#define INVALID_COPP_ID 0xFF
+/* Definition for a legacy device session. */
+#define ADM_LEGACY_DEVICE_SESSION 0
+#define ADM_MATRIX_ID_AUDIO_RX 0
+#define ADM_MATRIX_ID_AUDIO_TX 1
+
+struct q6copp {
+ int afe_port;
+ int copp_idx;
+ int id;
+ int topology;
+ int mode;
+ int rate;
+ int bit_width;
+ int channels;
+ int app_type;
+ int acdb_id;
+
+ struct aprv2_ibasic_rsp_result_t result;
+ struct kref refcount;
+ wait_queue_head_t wait;
+ struct list_head node;
+ struct q6adm *adm;
+};
+
+struct q6adm {
+ struct apr_device *apr;
+ struct device *dev;
+ struct q6core_svc_api_info ainfo;
+ unsigned long copp_bitmap[AFE_MAX_PORTS];
+ struct list_head copps_list;
+ spinlock_t copps_list_lock;
+ struct aprv2_ibasic_rsp_result_t result;
+ struct mutex lock;
+ wait_queue_head_t matrix_map_wait;
+ struct platform_device *pdev_routing;
+};
+
+struct q6adm_cmd_device_open_v5 {
+ u16 flags;
+ u16 mode_of_operation;
+ u16 endpoint_id_1;
+ u16 endpoint_id_2;
+ u32 topology_id;
+ u16 dev_num_channel;
+ u16 bit_width;
+ u32 sample_rate;
+ u8 dev_channel_mapping[8];
+} __packed;
+
+struct q6adm_cmd_matrix_map_routings_v5 {
+ u32 matrix_id;
+ u32 num_sessions;
+} __packed;
+
+struct q6adm_session_map_node_v5 {
+ u16 session_id;
+ u16 num_copps;
+} __packed;
+
+static struct q6copp *q6adm_find_copp(struct q6adm *adm, int port_idx,
+ int copp_idx)
+{
+ struct q6copp *c = NULL;
+ struct q6copp *ret = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adm->copps_list_lock, flags);
+ list_for_each_entry(c, &adm->copps_list, node) {
+ if ((port_idx == c->afe_port) && (copp_idx == c->copp_idx)) {
+ ret = c;
+ kref_get(&c->refcount);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&adm->copps_list_lock, flags);
+
+ return ret;
+
+}
+
+static void q6adm_free_copp(struct kref *ref)
+{
+ struct q6copp *c = container_of(ref, struct q6copp, refcount);
+ struct q6adm *adm = c->adm;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adm->copps_list_lock, flags);
+ clear_bit(c->copp_idx, &adm->copp_bitmap[c->afe_port]);
+ list_del(&c->node);
+ spin_unlock_irqrestore(&adm->copps_list_lock, flags);
+ kfree(c);
+}
+
+static int q6adm_callback(struct apr_device *adev, struct apr_resp_pkt *data)
+{
+ struct aprv2_ibasic_rsp_result_t *result = data->payload;
+ int port_idx, copp_idx;
+ struct apr_hdr *hdr = &data->hdr;
+ struct q6copp *copp;
+ struct q6adm *adm = dev_get_drvdata(&adev->dev);
+
+ if (!data->payload_size)
+ return 0;
+
+ copp_idx = (hdr->token) & 0XFF;
+ port_idx = ((hdr->token) >> 16) & 0xFF;
+ if (port_idx < 0 || port_idx >= AFE_MAX_PORTS) {
+ dev_err(&adev->dev, "Invalid port idx %d token %d\n",
+ port_idx, hdr->token);
+ return 0;
+ }
+ if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
+ dev_err(&adev->dev, "Invalid copp idx %d token %d\n",
+ copp_idx, hdr->token);
+ return 0;
+ }
+
+ switch (hdr->opcode) {
+ case APR_BASIC_RSP_RESULT: {
+ if (result->status != 0) {
+ dev_err(&adev->dev, "cmd = 0x%x return error = 0x%x\n",
+ result->opcode, result->status);
+ }
+ switch (result->opcode) {
+ case ADM_CMD_DEVICE_OPEN_V5:
+ case ADM_CMD_DEVICE_CLOSE_V5:
+ copp = q6adm_find_copp(adm, port_idx, copp_idx);
+ if (!copp)
+ return 0;
+
+ copp->result = *result;
+ wake_up(&copp->wait);
+ kref_put(&copp->refcount, q6adm_free_copp);
+ break;
+ case ADM_CMD_MATRIX_MAP_ROUTINGS_V5:
+ adm->result = *result;
+ wake_up(&adm->matrix_map_wait);
+ break;
+
+ default:
+ dev_err(&adev->dev, "Unknown Cmd: 0x%x\n",
+ result->opcode);
+ break;
+ }
+ return 0;
+ }
+ case ADM_CMDRSP_DEVICE_OPEN_V5: {
+ struct adm_cmd_rsp_device_open_v5 {
+ u32 status;
+ u16 copp_id;
+ u16 reserved;
+ } __packed * open = data->payload;
+
+ copp = q6adm_find_copp(adm, port_idx, copp_idx);
+ if (!copp)
+ return 0;
+
+ if (open->copp_id == INVALID_COPP_ID) {
+ dev_err(&adev->dev, "Invalid coppid rxed %d\n",
+ open->copp_id);
+ copp->result.status = ADSP_EBADPARAM;
+ wake_up(&copp->wait);
+ kref_put(&copp->refcount, q6adm_free_copp);
+ break;
+ }
+ copp->result.opcode = hdr->opcode;
+ copp->id = open->copp_id;
+ wake_up(&copp->wait);
+ kref_put(&copp->refcount, q6adm_free_copp);
+ }
+ break;
+ default:
+ dev_err(&adev->dev, "Unknown cmd:0x%x\n",
+ hdr->opcode);
+ break;
+ }
+
+ return 0;
+}
+
+static struct q6copp *q6adm_alloc_copp(struct q6adm *adm, int port_idx)
+{
+ struct q6copp *c;
+ int idx;
+
+ idx = find_first_zero_bit(&adm->copp_bitmap[port_idx],
+ MAX_COPPS_PER_PORT);
+
+ if (idx > MAX_COPPS_PER_PORT)
+ return ERR_PTR(-EBUSY);
+
+ c = kzalloc(sizeof(*c), GFP_ATOMIC);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ set_bit(idx, &adm->copp_bitmap[port_idx]);
+ c->copp_idx = idx;
+ c->afe_port = port_idx;
+ c->adm = adm;
+
+ init_waitqueue_head(&c->wait);
+
+ return c;
+}
+
+static int q6adm_apr_send_copp_pkt(struct q6adm *adm, struct q6copp *copp,
+ struct apr_pkt *pkt, uint32_t rsp_opcode)
+{
+ struct device *dev = adm->dev;
+ uint32_t opcode = pkt->hdr.opcode;
+ int ret;
+
+ mutex_lock(&adm->lock);
+ copp->result.opcode = 0;
+ copp->result.status = 0;
+ ret = apr_send_pkt(adm->apr, pkt);
+ if (ret < 0) {
+ dev_err(dev, "Failed to send APR packet\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* Wait for the callback with copp id */
+ if (rsp_opcode)
+ ret = wait_event_timeout(copp->wait,
+ (copp->result.opcode == opcode) ||
+ (copp->result.opcode == rsp_opcode),
+ msecs_to_jiffies(TIMEOUT_MS));
+ else
+ ret = wait_event_timeout(copp->wait,
+ (copp->result.opcode == opcode),
+ msecs_to_jiffies(TIMEOUT_MS));
+
+ if (!ret) {
+ dev_err(dev, "ADM copp cmd timedout\n");
+ ret = -ETIMEDOUT;
+ } else if (copp->result.status > 0) {
+ dev_err(dev, "DSP returned error[%d]\n",
+ copp->result.status);
+ ret = -EINVAL;
+ }
+
+err:
+ mutex_unlock(&adm->lock);
+ return ret;
+}
+
+static int q6adm_device_close(struct q6adm *adm, struct q6copp *copp,
+ int port_id, int copp_idx)
+{
+ struct apr_pkt close;
+
+ close.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ close.hdr.pkt_size = sizeof(close);
+ close.hdr.src_port = port_id;
+ close.hdr.dest_port = copp->id;
+ close.hdr.token = port_id << 16 | copp_idx;
+ close.hdr.opcode = ADM_CMD_DEVICE_CLOSE_V5;
+
+ return q6adm_apr_send_copp_pkt(adm, copp, &close, 0);
+}
+
+static struct q6copp *q6adm_find_matching_copp(struct q6adm *adm,
+ int port_id, int topology,
+ int mode, int rate,
+ int channel_mode, int bit_width,
+ int app_type)
+{
+ struct q6copp *c = NULL;
+ struct q6copp *ret = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adm->copps_list_lock, flags);
+
+ list_for_each_entry(c, &adm->copps_list, node) {
+ if ((port_id == c->afe_port) && (topology == c->topology) &&
+ (mode == c->mode) && (rate == c->rate) &&
+ (bit_width == c->bit_width) && (app_type == c->app_type)) {
+ ret = c;
+ kref_get(&c->refcount);
+ }
+ }
+ spin_unlock_irqrestore(&adm->copps_list_lock, flags);
+
+ return ret;
+}
+
+static int q6adm_device_open(struct q6adm *adm, struct q6copp *copp,
+ int port_id, int path, int topology,
+ int channel_mode, int bit_width, int rate)
+{
+ struct q6adm_cmd_device_open_v5 *open;
+ int afe_port = q6afe_get_port_id(port_id);
+ struct apr_pkt *pkt;
+ void *p;
+ int ret, pkt_size;
+
+ pkt_size = APR_HDR_SIZE + sizeof(*open);
+ p = kzalloc(pkt_size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ open = p + APR_HDR_SIZE;
+ pkt->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ pkt->hdr.pkt_size = pkt_size;
+ pkt->hdr.src_port = afe_port;
+ pkt->hdr.dest_port = afe_port;
+ pkt->hdr.token = port_id << 16 | copp->copp_idx;
+ pkt->hdr.opcode = ADM_CMD_DEVICE_OPEN_V5;
+ open->flags = ADM_LEGACY_DEVICE_SESSION;
+ open->mode_of_operation = path;
+ open->endpoint_id_1 = afe_port;
+ open->topology_id = topology;
+ open->dev_num_channel = channel_mode & 0x00FF;
+ open->bit_width = bit_width;
+ open->sample_rate = rate;
+
+ ret = q6dsp_map_channels(&open->dev_channel_mapping[0],
+ channel_mode);
+ if (ret)
+ goto err;
+
+ ret = q6adm_apr_send_copp_pkt(adm, copp, pkt,
+ ADM_CMDRSP_DEVICE_OPEN_V5);
+
+err:
+ kfree(pkt);
+ return ret;
+}
+
+/**
+ * q6adm_open() - open adm and grab a free copp
+ *
+ * @dev: Pointer to adm child device.
+ * @port_id: port id
+ * @path: playback or capture path.
+ * @rate: rate at which copp is required.
+ * @channel_mode: channel mode
+ * @topology: adm topology id
+ * @perf_mode: performace mode.
+ * @bit_width: audio sample bit width
+ * @app_type: Application type.
+ * @acdb_id: ACDB id
+ *
+ * Return: Will be an negative on error or a valid copp pointer on success.
+ */
+struct q6copp *q6adm_open(struct device *dev, int port_id, int path, int rate,
+ int channel_mode, int topology, int perf_mode,
+ uint16_t bit_width, int app_type, int acdb_id)
+{
+ struct q6adm *adm = dev_get_drvdata(dev->parent);
+ struct q6copp *copp;
+ unsigned long flags;
+ int ret = 0;
+
+ if (port_id < 0) {
+ dev_err(dev, "Invalid port_id 0x%x\n", port_id);
+ return ERR_PTR(-EINVAL);
+ }
+
+ copp = q6adm_find_matching_copp(adm, port_id, topology, perf_mode,
+ rate, channel_mode, bit_width, app_type);
+ if (copp) {
+ dev_err(dev, "Found Matching Copp 0x%x\n", copp->copp_idx);
+ return copp;
+ }
+
+ spin_lock_irqsave(&adm->copps_list_lock, flags);
+ copp = q6adm_alloc_copp(adm, port_id);
+ if (IS_ERR_OR_NULL(copp)) {
+ spin_unlock_irqrestore(&adm->copps_list_lock, flags);
+ return ERR_CAST(copp);
+ }
+
+ list_add_tail(&copp->node, &adm->copps_list);
+ spin_unlock_irqrestore(&adm->copps_list_lock, flags);
+
+ kref_init(&copp->refcount);
+ copp->topology = topology;
+ copp->mode = perf_mode;
+ copp->rate = rate;
+ copp->channels = channel_mode;
+ copp->bit_width = bit_width;
+ copp->app_type = app_type;
+
+
+ ret = q6adm_device_open(adm, copp, port_id, path, topology,
+ channel_mode, bit_width, rate);
+ if (ret < 0) {
+ kref_put(&copp->refcount, q6adm_free_copp);
+ return ERR_PTR(ret);
+ }
+
+ return copp;
+}
+EXPORT_SYMBOL_GPL(q6adm_open);
+
+/**
+ * q6adm_get_copp_id() - get copp index
+ *
+ * @copp: Pointer to valid copp
+ *
+ * Return: Will be an negative on error or a valid copp index on success.
+ **/
+int q6adm_get_copp_id(struct q6copp *copp)
+{
+ if (!copp)
+ return -EINVAL;
+
+ return copp->copp_idx;
+}
+EXPORT_SYMBOL_GPL(q6adm_get_copp_id);
+
+/**
+ * q6adm_matrix_map() - Map asm streams and afe ports using payload
+ *
+ * @dev: Pointer to adm child device.
+ * @path: playback or capture path.
+ * @payload_map: map between session id and afe ports.
+ * @perf_mode: Performace mode.
+ *
+ * Return: Will be an negative on error or a zero on success.
+ */
+int q6adm_matrix_map(struct device *dev, int path,
+ struct route_payload payload_map, int perf_mode)
+{
+ struct q6adm *adm = dev_get_drvdata(dev->parent);
+ struct q6adm_cmd_matrix_map_routings_v5 *route;
+ struct q6adm_session_map_node_v5 *node;
+ struct apr_pkt *pkt;
+ uint16_t *copps_list;
+ int pkt_size, ret, i, copp_idx;
+ void *matrix_map = NULL;
+ struct q6copp *copp;
+
+ /* Assumes port_ids have already been validated during adm_open */
+ pkt_size = (APR_HDR_SIZE + sizeof(*route) + sizeof(*node) +
+ (sizeof(uint32_t) * payload_map.num_copps));
+
+ matrix_map = kzalloc(pkt_size, GFP_KERNEL);
+ if (!matrix_map)
+ return -ENOMEM;
+
+ pkt = matrix_map;
+ route = matrix_map + APR_HDR_SIZE;
+ node = matrix_map + APR_HDR_SIZE + sizeof(*route);
+ copps_list = matrix_map + APR_HDR_SIZE + sizeof(*route) + sizeof(*node);
+
+ pkt->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ pkt->hdr.pkt_size = pkt_size;
+ pkt->hdr.token = 0;
+ pkt->hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS_V5;
+ route->num_sessions = 1;
+
+ switch (path) {
+ case ADM_PATH_PLAYBACK:
+ route->matrix_id = ADM_MATRIX_ID_AUDIO_RX;
+ break;
+ case ADM_PATH_LIVE_REC:
+ route->matrix_id = ADM_MATRIX_ID_AUDIO_TX;
+ break;
+ default:
+ dev_err(dev, "Wrong path set[%d]\n", path);
+ break;
+ }
+
+ node->session_id = payload_map.session_id;
+ node->num_copps = payload_map.num_copps;
+
+ for (i = 0; i < payload_map.num_copps; i++) {
+ int port_idx = payload_map.port_id[i];
+
+ if (port_idx < 0) {
+ dev_err(dev, "Invalid port_id 0x%x\n",
+ payload_map.port_id[i]);
+ kfree(pkt);
+ return -EINVAL;
+ }
+ copp_idx = payload_map.copp_idx[i];
+
+ copp = q6adm_find_copp(adm, port_idx, copp_idx);
+ if (!copp) {
+ kfree(pkt);
+ return -EINVAL;
+ }
+
+ copps_list[i] = copp->id;
+ kref_put(&copp->refcount, q6adm_free_copp);
+ }
+
+ mutex_lock(&adm->lock);
+ adm->result.status = 0;
+ adm->result.opcode = 0;
+
+ ret = apr_send_pkt(adm->apr, pkt);
+ if (ret < 0) {
+ dev_err(dev, "routing for stream %d failed ret %d\n",
+ payload_map.session_id, ret);
+ goto fail_cmd;
+ }
+ ret = wait_event_timeout(adm->matrix_map_wait,
+ adm->result.opcode == pkt->hdr.opcode,
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ dev_err(dev, "routing for stream %d failed\n",
+ payload_map.session_id);
+ ret = -ETIMEDOUT;
+ goto fail_cmd;
+ } else if (adm->result.status > 0) {
+ dev_err(dev, "DSP returned error[%d]\n",
+ adm->result.status);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+
+fail_cmd:
+ mutex_unlock(&adm->lock);
+ kfree(pkt);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(q6adm_matrix_map);
+
+/**
+ * q6adm_close() - Close adm copp
+ *
+ * @dev: Pointer to adm child device.
+ * @copp: pointer to previously opened copp
+ *
+ * Return: Will be an negative on error or a zero on success.
+ */
+int q6adm_close(struct device *dev, struct q6copp *copp)
+{
+ struct q6adm *adm = dev_get_drvdata(dev->parent);
+ int ret = 0;
+
+ ret = q6adm_device_close(adm, copp, copp->afe_port, copp->copp_idx);
+ if (ret < 0) {
+ dev_err(adm->dev, "Failed to close copp %d\n", ret);
+ return ret;
+ }
+
+ kref_put(&copp->refcount, q6adm_free_copp);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(q6adm_close);
+
+static int q6adm_probe(struct apr_device *adev)
+{
+ struct device *dev = &adev->dev;
+ struct device_node *dais_np;
+ struct q6adm *adm;
+
+ adm = devm_kzalloc(&adev->dev, sizeof(*adm), GFP_KERNEL);
+ if (!adm)
+ return -ENOMEM;
+
+ adm->apr = adev;
+ dev_set_drvdata(&adev->dev, adm);
+ adm->dev = dev;
+ q6core_get_svc_api_info(adev->svc_id, &adm->ainfo);
+ mutex_init(&adm->lock);
+ init_waitqueue_head(&adm->matrix_map_wait);
+
+ INIT_LIST_HEAD(&adm->copps_list);
+ spin_lock_init(&adm->copps_list_lock);
+
+ dais_np = of_get_child_by_name(dev->of_node, "routing");
+ if (dais_np) {
+ adm->pdev_routing = of_platform_device_create(dais_np,
+ "q6routing", dev);
+ of_node_put(dais_np);
+ }
+
+ return 0;
+}
+
+static int q6adm_remove(struct apr_device *adev)
+{
+ struct q6adm *adm = dev_get_drvdata(&adev->dev);
+
+ if (adm->pdev_routing)
+ of_platform_device_destroy(&adm->pdev_routing->dev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id q6adm_device_id[] = {
+ { .compatible = "qcom,q6adm" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, q6adm_device_id);
+
+static struct apr_driver qcom_q6adm_driver = {
+ .probe = q6adm_probe,
+ .remove = q6adm_remove,
+ .callback = q6adm_callback,
+ .driver = {
+ .name = "qcom-q6adm",
+ .of_match_table = of_match_ptr(q6adm_device_id),
+ },
+};
+
+module_apr_driver(qcom_q6adm_driver);
+MODULE_DESCRIPTION("Q6 Audio Device Manager");
+MODULE_LICENSE("GPL v2");
diff --git a/rr-cache/da2f4fa23cdc2d2a73d81a6b53be31ece76d492c/preimage b/rr-cache/da2f4fa23cdc2d2a73d81a6b53be31ece76d492c/preimage
new file mode 100644
index 0000000..e946d28
--- /dev/null
+++ b/rr-cache/da2f4fa23cdc2d2a73d81a6b53be31ece76d492c/preimage
@@ -0,0 +1,1390 @@
+// SPDX-License-Identifier: GPL-2.0
+<<<<<<<
+/*
+ * Copyright (c) 2011-2017, The Linux Foundation
+ * Copyright (c) 2018, Linaro Limited
+ */
+=======
+// Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+// Copyright (c) 2018, Linaro Limited
+>>>>>>>
+
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/jiffies.h>
+#include <linux/of.h>
+<<<<<<<
+=======
+#include <linux/of_platform.h>
+#include <linux/kref.h>
+>>>>>>>
+#include <linux/wait.h>
+#include <linux/soc/qcom/apr.h>
+#include <linux/platform_device.h>
+#include <sound/asound.h>
+#include "q6adm.h"
+#include "q6afe.h"
+#include "q6core.h"
+#include "q6dsp-errno.h"
+#include "q6dsp-common.h"
+
+#define ADM_CMD_DEVICE_OPEN_V5 0x00010326
+#define ADM_CMDRSP_DEVICE_OPEN_V5 0x00010329
+#define ADM_CMD_DEVICE_CLOSE_V5 0x00010327
+#define ADM_CMD_MATRIX_MAP_ROUTINGS_V5 0x00010325
+
+#define TIMEOUT_MS 1000
+#define RESET_COPP_ID 99
+#define INVALID_COPP_ID 0xFF
+/* Definition for a legacy device session. */
+#define ADM_LEGACY_DEVICE_SESSION 0
+#define ADM_MATRIX_ID_AUDIO_RX 0
+#define ADM_MATRIX_ID_AUDIO_TX 1
+<<<<<<<
+
+struct q6copp {
+ int afe_port;
+ int copp_idx;
+ int id;
+ int topology;
+ int mode;
+=======
+struct copp {
+ int afe_port;
+ int copp_idx;
+ int id;
+ int refcnt;
+ int topology;
+ int mode;
+ int stat;
+>>>>>>>
+ int rate;
+ int bit_width;
+ int channels;
+ int app_type;
+ int acdb_id;
+<<<<<<<
+ struct mutex lock;
+=======
+
+ struct aprv2_ibasic_rsp_result_t result;
+ struct kref refcount;
+>>>>>>>
+ wait_queue_head_t wait;
+ struct list_head node;
+ struct q6adm *adm;
+};
+
+<<<<<<<
+struct q6adm {
+ struct apr_device *apr;
+ struct device *dev;
+ struct q6core_svc_api_info ainfo;
+ unsigned long copp_bitmap[AFE_MAX_PORTS];
+ struct list_head copps_list;
+ spinlock_t copps_list_lock;
+ struct aprv2_ibasic_rsp_result_t result;
+ struct mutex lock;
+ wait_queue_head_t matrix_map_wait;
+ struct platform_device *pdev_routing;
+};
+
+struct q6adm_cmd_device_open_v5 {
+=======
+struct q6adm_api_data {
+ int (*open)(struct q6adm *adm, struct copp *copp, int port_id,
+ int path, int topology, int channel_mode,
+ int bit_width, int rate);
+ int (*matrix_map)(struct device *dev, int path,
+ struct route_payload payload_map, int perf_mode);
+ int (*close)(struct q6adm *adm, struct copp *copp,
+ int port_id, int copp_idx);
+
+};
+
+struct q6adm {
+ struct apr_device *apr;
+ struct device *dev;
+ unsigned long copp_bitmap[AFE_MAX_PORTS];
+ struct list_head copps_list;
+ spinlock_t copps_list_lock;
+ int matrix_map_stat;
+ struct mutex lock;
+ wait_queue_head_t matrix_map_wait;
+ void *routing_data;
+ struct q6adm_api_data *ops;
+};
+
+struct adm_cmd_device_open_v5 {
+ struct apr_hdr hdr;
+>>>>>>>
+ u16 flags;
+ u16 mode_of_operation;
+ u16 endpoint_id_1;
+ u16 endpoint_id_2;
+ u32 topology_id;
+ u16 dev_num_channel;
+ u16 bit_width;
+ u32 sample_rate;
+ u8 dev_channel_mapping[8];
+} __packed;
+
+<<<<<<<
+struct adm_cmd_matrix_map_routings_v5 {
+ struct apr_hdr hdr;
+=======
+struct q6adm_cmd_matrix_map_routings_v5 {
+>>>>>>>
+ u32 matrix_id;
+ u32 num_sessions;
+} __packed;
+
+<<<<<<<
+struct adm_session_map_node_v5 {
+=======
+struct q6adm_session_map_node_v5 {
+>>>>>>>
+ u16 session_id;
+ u16 num_copps;
+} __packed;
+
+<<<<<<<
+/*** V1 ***/
+#define AUDIO_RX 0x0
+#define AUDIO_TX 0x1
+
+#define ADM_MAX_COPPS 5
+#define ADM_CMD_COPP_OPEN 0x00010304
+struct adm_copp_open_command_v1 {
+ struct apr_hdr hdr;
+ u16 flags;
+ u16 mode; /* 1-RX, 2-Live TX, 3-Non Live TX */
+ u16 endpoint_id1;
+ u16 endpoint_id2;
+ u32 topology_id;
+ u16 channel_config;
+ u16 reserved;
+ u32 rate;
+} __attribute__ ((packed));
+
+#define ADM_CMD_COPP_CLOSE 0x00010305
+
+
+#define ADM_CMDRSP_COPP_OPEN 0x0001030A
+struct adm_copp_open_respond {
+ u32 status;
+ u16 copp_id;
+ u16 reserved;
+} __attribute__ ((packed));
+
+
+#define ADM_CMD_MATRIX_MAP_ROUTINGS 0x00010301
+struct adm_routings_session {
+ u16 id;
+ u16 num_copps;
+ u16 copp_id[ADM_MAX_COPPS+1]; /*Padding if numCopps is odd */
+} __packed;
+
+struct adm_routings_command {
+ struct apr_hdr hdr;
+ u32 path; /* 0 = Rx, 1 Tx */
+ u32 num_sessions;
+ struct adm_routings_session session[8];
+} __attribute__ ((packed));
+
+/*********/
+static struct copp *adm_find_copp(struct q6adm *adm, int port_idx,
+ int copp_idx)
+{
+ struct copp *c;
+
+ spin_lock(&adm->copps_list_lock);
+ list_for_each_entry(c, &adm->copps_list, node) {
+ if ((port_idx == c->afe_port) && (copp_idx == c->copp_idx)) {
+ spin_unlock(&adm->copps_list_lock);
+ return c;
+ }
+ }
+
+ spin_unlock(&adm->copps_list_lock);
+ return NULL;
+
+}
+
+static int adm_callback(struct apr_device *adev,
+ struct apr_client_message *data)
+{
+ struct aprv2_ibasic_rsp_result_t *result = data->payload;
+ int port_idx, copp_idx;
+ struct copp *copp;
+=======
+static struct q6copp *q6adm_find_copp(struct q6adm *adm, int port_idx,
+ int copp_idx)
+{
+ struct q6copp *c = NULL;
+ struct q6copp *ret = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adm->copps_list_lock, flags);
+ list_for_each_entry(c, &adm->copps_list, node) {
+ if ((port_idx == c->afe_port) && (copp_idx == c->copp_idx)) {
+ ret = c;
+ kref_get(&c->refcount);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&adm->copps_list_lock, flags);
+
+ return ret;
+
+}
+
+static void q6adm_free_copp(struct kref *ref)
+{
+ struct q6copp *c = container_of(ref, struct q6copp, refcount);
+ struct q6adm *adm = c->adm;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adm->copps_list_lock, flags);
+ clear_bit(c->copp_idx, &adm->copp_bitmap[c->afe_port]);
+ list_del(&c->node);
+ spin_unlock_irqrestore(&adm->copps_list_lock, flags);
+ kfree(c);
+}
+
+static int q6adm_callback(struct apr_device *adev, struct apr_resp_pkt *data)
+{
+ struct aprv2_ibasic_rsp_result_t *result = data->payload;
+ int port_idx, copp_idx;
+ struct apr_hdr *hdr = &data->hdr;
+ struct q6copp *copp;
+>>>>>>>
+ struct q6adm *adm = dev_get_drvdata(&adev->dev);
+
+ if (!data->payload_size)
+ return 0;
+
+<<<<<<<
+ copp_idx = (data->token) & 0XFF;
+ port_idx = ((data->token) >> 16) & 0xFF;
+ if (port_idx < 0 || port_idx >= AFE_MAX_PORTS) {
+ dev_err(&adev->dev, "Invalid port idx %d token %d\n",
+ port_idx, data->token);
+=======
+ copp_idx = (hdr->token) & 0XFF;
+ port_idx = ((hdr->token) >> 16) & 0xFF;
+ if (port_idx < 0 || port_idx >= AFE_MAX_PORTS) {
+ dev_err(&adev->dev, "Invalid port idx %d token %d\n",
+ port_idx, hdr->token);
+>>>>>>>
+ return 0;
+ }
+ if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
+ dev_err(&adev->dev, "Invalid copp idx %d token %d\n",
+<<<<<<<
+ copp_idx, data->token);
+ return 0;
+ }
+
+ switch (data->opcode) {
+=======
+ copp_idx, hdr->token);
+ return 0;
+ }
+
+ switch (hdr->opcode) {
+>>>>>>>
+ case APR_BASIC_RSP_RESULT: {
+ if (result->status != 0) {
+ dev_err(&adev->dev, "cmd = 0x%x return error = 0x%x\n",
+ result->opcode, result->status);
+ }
+ switch (result->opcode) {
+<<<<<<<
+ case ADM_CMD_COPP_OPEN:
+ case ADM_CMD_COPP_CLOSE:
+ case ADM_CMD_DEVICE_OPEN_V5:
+ case ADM_CMD_DEVICE_CLOSE_V5:
+ copp = adm_find_copp(adm, port_idx, copp_idx);
+ if (IS_ERR_OR_NULL(copp))
+ return 0;
+
+ copp->stat = result->status;
+ wake_up(&copp->wait);
+ break;
+ case ADM_CMD_MATRIX_MAP_ROUTINGS_V5:
+ case ADM_CMD_MATRIX_MAP_ROUTINGS:
+ adm->matrix_map_stat = result->status;
+=======
+ case ADM_CMD_DEVICE_OPEN_V5:
+ case ADM_CMD_DEVICE_CLOSE_V5:
+ copp = q6adm_find_copp(adm, port_idx, copp_idx);
+ if (!copp)
+ return 0;
+
+ copp->result = *result;
+ wake_up(&copp->wait);
+ kref_put(&copp->refcount, q6adm_free_copp);
+ break;
+ case ADM_CMD_MATRIX_MAP_ROUTINGS_V5:
+ adm->result = *result;
+>>>>>>>
+ wake_up(&adm->matrix_map_wait);
+ break;
+
+ default:
+ dev_err(&adev->dev, "Unknown Cmd: 0x%x\n",
+ result->opcode);
+ break;
+ }
+ return 0;
+ }
+<<<<<<<
+=======
+ case ADM_CMDRSP_COPP_OPEN:
+>>>>>>>
+ case ADM_CMDRSP_DEVICE_OPEN_V5: {
+ struct adm_cmd_rsp_device_open_v5 {
+ u32 status;
+ u16 copp_id;
+ u16 reserved;
+ } __packed * open = data->payload;
+
+<<<<<<<
+ copp = q6adm_find_copp(adm, port_idx, copp_idx);
+ if (!copp)
+=======
+ open = data->payload;
+ copp = adm_find_copp(adm, port_idx, copp_idx);
+ if (IS_ERR_OR_NULL(copp))
+>>>>>>>
+ return 0;
+
+ if (open->copp_id == INVALID_COPP_ID) {
+ dev_err(&adev->dev, "Invalid coppid rxed %d\n",
+ open->copp_id);
+<<<<<<<
+ copp->result.status = ADSP_EBADPARAM;
+ wake_up(&copp->wait);
+ kref_put(&copp->refcount, q6adm_free_copp);
+ break;
+ }
+ copp->result.opcode = hdr->opcode;
+ copp->id = open->copp_id;
+ wake_up(&copp->wait);
+ kref_put(&copp->refcount, q6adm_free_copp);
+=======
+ copp->stat = ADSP_EBADPARAM;
+ wake_up(&copp->wait);
+ break;
+ }
+ copp->stat = result->opcode;
+ copp->id = open->copp_id;
+ wake_up(&copp->wait);
+>>>>>>>
+ }
+ break;
+ default:
+ dev_err(&adev->dev, "Unknown cmd:0x%x\n",
+<<<<<<<
+ data->opcode);
+=======
+ hdr->opcode);
+>>>>>>>
+ break;
+ }
+
+ return 0;
+}
+
+<<<<<<<
+static struct copp *adm_alloc_copp(struct q6adm *adm, int port_idx)
+{
+ struct copp *c;
+=======
+static struct q6copp *q6adm_alloc_copp(struct q6adm *adm, int port_idx)
+{
+ struct q6copp *c;
+>>>>>>>
+ int idx;
+
+ idx = find_first_zero_bit(&adm->copp_bitmap[port_idx],
+ MAX_COPPS_PER_PORT);
+
+ if (idx > MAX_COPPS_PER_PORT)
+ return ERR_PTR(-EBUSY);
+
+<<<<<<<
+ c = kzalloc(sizeof(*c), GFP_ATOMIC);
+=======
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+>>>>>>>
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ set_bit(idx, &adm->copp_bitmap[port_idx]);
+ c->copp_idx = idx;
+ c->afe_port = port_idx;
+ c->adm = adm;
+
+<<<<<<<
+ init_waitqueue_head(&c->wait);
+
+ return c;
+}
+
+static int q6adm_apr_send_copp_pkt(struct q6adm *adm, struct q6copp *copp,
+ struct apr_pkt *pkt, uint32_t rsp_opcode)
+{
+ struct device *dev = adm->dev;
+ uint32_t opcode = pkt->hdr.opcode;
+ int ret;
+
+ mutex_lock(&adm->lock);
+ copp->result.opcode = 0;
+ copp->result.status = 0;
+ ret = apr_send_pkt(adm->apr, pkt);
+=======
+ mutex_init(&c->lock);
+ init_waitqueue_head(&c->wait);
+
+ spin_lock(&adm->copps_list_lock);
+ list_add_tail(&c->node, &adm->copps_list);
+ spin_unlock(&adm->copps_list_lock);
+
+ return c;
+}
+
+static void adm_free_copp(struct q6adm *adm, struct copp *c, int port_idx)
+{
+ clear_bit(c->copp_idx, &adm->copp_bitmap[port_idx]);
+ spin_lock(&adm->copps_list_lock);
+ list_del(&c->node);
+ spin_unlock(&adm->copps_list_lock);
+ kfree(c);
+}
+
+static struct copp *adm_find_matching_copp(struct q6adm *adm,
+ int port_id, int topology,
+ int mode, int rate, int channel_mode,
+ int bit_width, int app_type)
+{
+ struct copp *c;
+
+ spin_lock(&adm->copps_list_lock);
+
+ list_for_each_entry(c, &adm->copps_list, node) {
+ if ((port_id == c->afe_port) && (topology == c->topology) &&
+ (mode == c->mode) && (rate == c->rate) &&
+ (bit_width == c->bit_width) && (app_type == c->app_type)) {
+ spin_unlock(&adm->copps_list_lock);
+ return c;
+ }
+ }
+ spin_unlock(&adm->copps_list_lock);
+
+ c = adm_alloc_copp(adm, port_id);
+ if (IS_ERR_OR_NULL(c))
+ return ERR_CAST(c);
+
+ mutex_lock(&c->lock);
+ c->refcnt = 0;
+ c->topology = topology;
+ c->mode = mode;
+ c->rate = rate;
+ c->channels = channel_mode;
+ c->bit_width = bit_width;
+ c->app_type = app_type;
+ mutex_unlock(&c->lock);
+
+ return c;
+
+}
+
+static int q6adm_apr_send_copp_pkt(struct q6adm *adm, struct copp *copp,
+ void *data)
+{
+ struct device *dev = adm->dev;
+ int ret;
+
+ mutex_lock(&copp->lock);
+ copp->stat = -1;
+ ret = apr_send_pkt(adm->apr, data);
+>>>>>>>
+ if (ret < 0) {
+ dev_err(dev, "Failed to send APR packet\n");
+ ret = -EINVAL;
+ goto err;
+ }
+<<<<<<<
+ /* Wait for the callback with copp id */
+ ret = wait_event_timeout(copp->wait, copp->stat >= 0,
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ dev_err(dev, "ADM copp cmd timedout\n");
+ ret = -EINVAL;
+ } else if (copp->stat > 0) {
+ dev_err(dev, "DSP returned error[%s]\n",
+ q6dsp_strerror(copp->stat));
+ ret = q6dsp_errno(copp->stat);
+ }
+
+err:
+ mutex_unlock(&copp->lock);
+ return ret;
+}
+
+static int q6adm_device_open(struct q6adm *adm, struct copp *copp, int port_id,
+ int path, int topology, int channel_mode,
+ int bit_width, int rate)
+{
+ struct adm_cmd_device_open_v5 open = {0,};
+ int afe_port = q6afe_get_port_id(port_id);
+ int ret;
+
+ open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ open.hdr.pkt_size = sizeof(open);
+ open.hdr.src_svc = APR_SVC_ADM;
+ open.hdr.src_domain = APR_DOMAIN_APPS;
+ open.hdr.src_port = afe_port;
+ open.hdr.dest_svc = APR_SVC_ADM;
+ open.hdr.dest_domain = APR_DOMAIN_ADSP;
+ open.hdr.dest_port = afe_port;
+ open.hdr.token = port_id << 16 | copp->copp_idx;
+ open.hdr.opcode = ADM_CMD_DEVICE_OPEN_V5;
+ open.flags = ADM_LEGACY_DEVICE_SESSION;
+ open.mode_of_operation = path;
+ open.endpoint_id_1 = afe_port;
+ open.topology_id = topology;
+ open.dev_num_channel = channel_mode & 0x00FF;
+ open.bit_width = bit_width;
+ open.sample_rate = rate;
+
+ ret = q6dsp_map_channels(&open.dev_channel_mapping[0],
+ channel_mode);
+ if (ret)
+ return ret;
+
+ return q6adm_apr_send_copp_pkt(adm, copp, &open);
+}
+
+static int q6adm_device_open_v1(struct q6adm *adm, struct copp *copp, int port_id,
+ int path, int topology, int channel_mode,
+ int bit_width, int rate)
+{
+ struct adm_copp_open_command_v1 open = {0,};
+ int afe_port = port_id;
+
+ open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ open.hdr.pkt_size = sizeof(open);
+ open.hdr.src_svc = APR_SVC_ADM;
+ open.hdr.src_domain = APR_DOMAIN_APPS;
+ open.hdr.src_port = afe_port;
+ open.hdr.dest_svc = APR_SVC_ADM;
+ open.hdr.dest_domain = APR_DOMAIN_ADSP;
+ open.hdr.dest_port = afe_port;
+ open.hdr.token = port_id << 16 | copp->copp_idx;
+ open.hdr.opcode = ADM_CMD_COPP_OPEN;
+
+ open.flags = ADM_LEGACY_DEVICE_SESSION;
+ open.mode = path;
+ open.endpoint_id1 = afe_port;
+ open.endpoint_id2 = 0xFFFF;
+ open.topology_id = topology;
+ open.channel_config = channel_mode & 0x00FF;
+ open.rate = rate;
+
+ return q6adm_apr_send_copp_pkt(adm, copp, &open);
+}
+=======
+
+ /* Wait for the callback with copp id */
+ if (rsp_opcode)
+ ret = wait_event_timeout(copp->wait,
+ (copp->result.opcode == opcode) ||
+ (copp->result.opcode == rsp_opcode),
+ msecs_to_jiffies(TIMEOUT_MS));
+ else
+ ret = wait_event_timeout(copp->wait,
+ (copp->result.opcode == opcode),
+ msecs_to_jiffies(TIMEOUT_MS));
+
+ if (!ret) {
+ dev_err(dev, "ADM copp cmd timedout\n");
+ ret = -ETIMEDOUT;
+ } else if (copp->result.status > 0) {
+ dev_err(dev, "DSP returned error[%d]\n",
+ copp->result.status);
+ ret = -EINVAL;
+ }
+
+err:
+ mutex_unlock(&adm->lock);
+ return ret;
+}
+
+static int q6adm_device_close(struct q6adm *adm, struct q6copp *copp,
+ int port_id, int copp_idx)
+{
+ struct apr_pkt close;
+
+ close.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ close.hdr.pkt_size = sizeof(close);
+ close.hdr.src_port = port_id;
+ close.hdr.dest_port = copp->id;
+ close.hdr.token = port_id << 16 | copp_idx;
+ close.hdr.opcode = ADM_CMD_DEVICE_CLOSE_V5;
+
+ return q6adm_apr_send_copp_pkt(adm, copp, &close, 0);
+}
+
+static struct q6copp *q6adm_find_matching_copp(struct q6adm *adm,
+ int port_id, int topology,
+ int mode, int rate,
+ int channel_mode, int bit_width,
+ int app_type)
+{
+ struct q6copp *c = NULL;
+ struct q6copp *ret = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adm->copps_list_lock, flags);
+
+ list_for_each_entry(c, &adm->copps_list, node) {
+ if ((port_id == c->afe_port) && (topology == c->topology) &&
+ (mode == c->mode) && (rate == c->rate) &&
+ (bit_width == c->bit_width) && (app_type == c->app_type)) {
+ ret = c;
+ kref_get(&c->refcount);
+ }
+ }
+ spin_unlock_irqrestore(&adm->copps_list_lock, flags);
+
+ return ret;
+}
+
+static int q6adm_device_open(struct q6adm *adm, struct q6copp *copp,
+ int port_id, int path, int topology,
+ int channel_mode, int bit_width, int rate)
+{
+ struct q6adm_cmd_device_open_v5 *open;
+ int afe_port = q6afe_get_port_id(port_id);
+ struct apr_pkt *pkt;
+ void *p;
+ int ret, pkt_size;
+
+ pkt_size = APR_HDR_SIZE + sizeof(*open);
+ p = kzalloc(pkt_size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ open = p + APR_HDR_SIZE;
+ pkt->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ pkt->hdr.pkt_size = pkt_size;
+ pkt->hdr.src_port = afe_port;
+ pkt->hdr.dest_port = afe_port;
+ pkt->hdr.token = port_id << 16 | copp->copp_idx;
+ pkt->hdr.opcode = ADM_CMD_DEVICE_OPEN_V5;
+ open->flags = ADM_LEGACY_DEVICE_SESSION;
+ open->mode_of_operation = path;
+ open->endpoint_id_1 = afe_port;
+ open->topology_id = topology;
+ open->dev_num_channel = channel_mode & 0x00FF;
+ open->bit_width = bit_width;
+ open->sample_rate = rate;
+
+ ret = q6dsp_map_channels(&open->dev_channel_mapping[0],
+ channel_mode);
+ if (ret)
+ goto err;
+
+ ret = q6adm_apr_send_copp_pkt(adm, copp, pkt,
+ ADM_CMDRSP_DEVICE_OPEN_V5);
+
+err:
+ kfree(pkt);
+ return ret;
+}
+
+>>>>>>>
+/**
+ * q6adm_open() - open adm and grab a free copp
+ *
+ * @dev: Pointer to adm child device.
+ * @port_id: port id
+ * @path: playback or capture path.
+ * @rate: rate at which copp is required.
+ * @channel_mode: channel mode
+ * @topology: adm topology id
+ * @perf_mode: performace mode.
+ * @bit_width: audio sample bit width
+ * @app_type: Application type.
+ * @acdb_id: ACDB id
+ *
+<<<<<<<
+ * Return: Will be an negative on error or a valid copp index on success.
+ */
+int q6adm_open(struct device *dev, int port_id, int path, int rate,
+ int channel_mode, int topology, int perf_mode,
+ uint16_t bit_width, int app_type, int acdb_id)
+{
+ struct q6adm *adm = dev_get_drvdata(dev);
+ struct copp *copp;
+=======
+ * Return: Will be an negative on error or a valid copp pointer on success.
+ */
+struct q6copp *q6adm_open(struct device *dev, int port_id, int path, int rate,
+ int channel_mode, int topology, int perf_mode,
+ uint16_t bit_width, int app_type, int acdb_id)
+{
+ struct q6adm *adm = dev_get_drvdata(dev->parent);
+ struct q6copp *copp;
+ unsigned long flags;
+>>>>>>>
+ int ret = 0;
+
+ if (port_id < 0) {
+ dev_err(dev, "Invalid port_id 0x%x\n", port_id);
+<<<<<<<
+ return -EINVAL;
+ }
+
+ copp = adm_find_matching_copp(adm, port_id, topology, perf_mode,
+ rate, channel_mode, bit_width, app_type);
+
+ /* Create a COPP if port id are not enabled */
+ if (copp->refcnt == 0) {
+ ret = adm->ops->open(adm, copp, port_id, path, topology,
+ channel_mode, bit_width, rate);
+ if (ret < 0)
+ return ret;
+ }
+ mutex_lock(&copp->lock);
+ copp->refcnt++;
+ mutex_unlock(&copp->lock);
+
+ return copp->copp_idx;
+=======
+ return ERR_PTR(-EINVAL);
+ }
+
+ copp = q6adm_find_matching_copp(adm, port_id, topology, perf_mode,
+ rate, channel_mode, bit_width, app_type);
+ if (copp) {
+ dev_err(dev, "Found Matching Copp 0x%x\n", copp->copp_idx);
+ return copp;
+ }
+
+ spin_lock_irqsave(&adm->copps_list_lock, flags);
+ copp = q6adm_alloc_copp(adm, port_id);
+ if (IS_ERR_OR_NULL(copp)) {
+ spin_unlock_irqrestore(&adm->copps_list_lock, flags);
+ return ERR_CAST(copp);
+ }
+
+ list_add_tail(&copp->node, &adm->copps_list);
+ spin_unlock_irqrestore(&adm->copps_list_lock, flags);
+
+ kref_init(&copp->refcount);
+ copp->topology = topology;
+ copp->mode = perf_mode;
+ copp->rate = rate;
+ copp->channels = channel_mode;
+ copp->bit_width = bit_width;
+ copp->app_type = app_type;
+
+
+ ret = q6adm_device_open(adm, copp, port_id, path, topology,
+ channel_mode, bit_width, rate);
+ if (ret < 0) {
+ kref_put(&copp->refcount, q6adm_free_copp);
+ return ERR_PTR(ret);
+ }
+
+ return copp;
+>>>>>>>
+}
+EXPORT_SYMBOL_GPL(q6adm_open);
+
+/**
+<<<<<<<
+ * q6adm_get_copp_id() - get copp index
+ *
+ * @copp: Pointer to valid copp
+ *
+ * Return: Will be an negative on error or a valid copp index on success.
+ **/
+int q6adm_get_copp_id(struct q6copp *copp)
+{
+ if (!copp)
+ return -EINVAL;
+
+ return copp->copp_idx;
+}
+EXPORT_SYMBOL_GPL(q6adm_get_copp_id);
+
+/**
+ * q6adm_matrix_map() - Map asm streams and afe ports using payload
+ *
+ * @dev: Pointer to adm child device.
+ * @path: playback or capture path.
+ * @payload_map: map between session id and afe ports.
+ * @perf_mode: Performace mode.
+ *
+ * Return: Will be an negative on error or a zero on success.
+ */
+int q6adm_matrix_map(struct device *dev, int path,
+ struct route_payload payload_map, int perf_mode)
+{
+ struct q6adm *adm = dev_get_drvdata(dev->parent);
+ struct q6adm_cmd_matrix_map_routings_v5 *route;
+ struct q6adm_session_map_node_v5 *node;
+ struct apr_pkt *pkt;
+ uint16_t *copps_list;
+ int pkt_size, ret, i, copp_idx;
+ void *matrix_map = NULL;
+ struct q6copp *copp;
+
+ /* Assumes port_ids have already been validated during adm_open */
+ pkt_size = (APR_HDR_SIZE + sizeof(*route) + sizeof(*node) +
+ (sizeof(uint32_t) * payload_map.num_copps));
+
+ matrix_map = kzalloc(pkt_size, GFP_KERNEL);
+ if (!matrix_map)
+ return -ENOMEM;
+
+ pkt = matrix_map;
+ route = matrix_map + APR_HDR_SIZE;
+ node = matrix_map + APR_HDR_SIZE + sizeof(*route);
+ copps_list = matrix_map + APR_HDR_SIZE + sizeof(*route) + sizeof(*node);
+
+ pkt->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ pkt->hdr.pkt_size = pkt_size;
+ pkt->hdr.token = 0;
+ pkt->hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS_V5;
+=======
+ * q6adm_set_routing_data() - set routing private data
+ *
+ * @dev: Pointer to adm device.
+ * @data: routing private data
+ *
+ */
+void q6adm_set_routing_data(struct device *dev, void *data)
+{
+ struct q6adm *adm = dev_get_drvdata(dev);
+
+ adm->routing_data = data;
+}
+EXPORT_SYMBOL_GPL(q6adm_set_routing_data);
+
+/**
+ * q6adm_get_routing_data() - get routing private data
+ *
+ * @dev: Pointer to adm device.
+ *
+ * Return: pointer to routing private data
+ */
+void *q6adm_get_routing_data(struct device *dev)
+{
+ struct q6adm *adm = dev_get_drvdata(dev);
+
+ return adm->routing_data;
+}
+EXPORT_SYMBOL_GPL(q6adm_get_routing_data);
+
+int q6adm_matrix_map_v2(struct device *dev, int path,
+ struct route_payload payload_map, int perf_mode)
+{
+ struct q6adm *adm = dev_get_drvdata(dev);
+ struct adm_cmd_matrix_map_routings_v5 *route;
+ struct adm_session_map_node_v5 *node;
+ uint16_t *copps_list;
+ int cmd_size, ret, i, copp_idx;
+ void *matrix_map = NULL;
+ struct copp *copp;
+
+ /* Assumes port_ids have already been validated during adm_open */
+ cmd_size = (sizeof(*route) +
+ sizeof(*node) +
+ (sizeof(uint32_t) * payload_map.num_copps));
+ matrix_map = kzalloc(cmd_size, GFP_KERNEL);
+ if (!matrix_map)
+ return -ENOMEM;
+
+ route = matrix_map;
+ route->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ route->hdr.pkt_size = cmd_size;
+ route->hdr.src_svc = 0;
+ route->hdr.src_domain = APR_DOMAIN_APPS;
+ route->hdr.dest_svc = APR_SVC_ADM;
+ route->hdr.dest_domain = APR_DOMAIN_ADSP;
+ route->hdr.token = 0;
+ route->hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS_V5;
+>>>>>>>
+ route->num_sessions = 1;
+
+ switch (path) {
+ case ADM_PATH_PLAYBACK:
+ route->matrix_id = ADM_MATRIX_ID_AUDIO_RX;
+ break;
+ case ADM_PATH_LIVE_REC:
+ route->matrix_id = ADM_MATRIX_ID_AUDIO_TX;
+ break;
+ default:
+ dev_err(dev, "Wrong path set[%d]\n", path);
+<<<<<<<
+ break;
+ }
+
+ node->session_id = payload_map.session_id;
+ node->num_copps = payload_map.num_copps;
+=======
+
+ break;
+ }
+
+ node = matrix_map + sizeof(*route);
+ node->session_id = payload_map.session_id;
+ node->num_copps = payload_map.num_copps;
+ copps_list = matrix_map + sizeof(*route) + sizeof(*node);
+>>>>>>>
+
+ for (i = 0; i < payload_map.num_copps; i++) {
+ int port_idx = payload_map.port_id[i];
+
+ if (port_idx < 0) {
+ dev_err(dev, "Invalid port_id 0x%x\n",
+ payload_map.port_id[i]);
+<<<<<<<
+ kfree(pkt);
+ return -EINVAL;
+ }
+ copp_idx = payload_map.copp_idx[i];
+
+ copp = q6adm_find_copp(adm, port_idx, copp_idx);
+ if (!copp) {
+ kfree(pkt);
+ return -EINVAL;
+ }
+
+ copps_list[i] = copp->id;
+ kref_put(&copp->refcount, q6adm_free_copp);
+ }
+
+ mutex_lock(&adm->lock);
+ adm->result.status = 0;
+ adm->result.opcode = 0;
+
+ ret = apr_send_pkt(adm->apr, pkt);
+ if (ret < 0) {
+ dev_err(dev, "routing for stream %d failed ret %d\n",
+ payload_map.session_id, ret);
+ goto fail_cmd;
+ }
+ ret = wait_event_timeout(adm->matrix_map_wait,
+ adm->result.opcode == pkt->hdr.opcode,
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ dev_err(dev, "routing for stream %d failed\n",
+ payload_map.session_id);
+ ret = -ETIMEDOUT;
+ goto fail_cmd;
+ } else if (adm->result.status > 0) {
+ dev_err(dev, "DSP returned error[%d]\n",
+ adm->result.status);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+
+fail_cmd:
+ mutex_unlock(&adm->lock);
+ kfree(pkt);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(q6adm_matrix_map);
+
+=======
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+ copp_idx = payload_map.copp_idx[i];
+
+ copp = adm_find_copp(adm, port_idx, copp_idx);
+ if (IS_ERR_OR_NULL(copp)) {
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+
+ copps_list[i] = copp->id;
+ }
+
+ mutex_lock(&adm->lock);
+ adm->matrix_map_stat = -1;
+
+ ret = apr_send_pkt(adm->apr, matrix_map);
+ if (ret < 0) {
+ dev_err(dev, "routing for syream %d failed ret %d\n",
+ payload_map.session_id, ret);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+ ret = wait_event_timeout(adm->matrix_map_wait,
+ adm->matrix_map_stat >= 0,
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ dev_err(dev, "routing for syream %d failed\n",
+ payload_map.session_id);
+ ret = -ETIMEDOUT;
+ goto fail_cmd;
+ } else if (adm->matrix_map_stat > 0) {
+ dev_err(dev, "DSP returned error[%s]\n",
+ q6dsp_strerror(adm->matrix_map_stat));
+ ret = q6dsp_errno(adm->matrix_map_stat);
+ goto fail_cmd;
+ }
+
+fail_cmd:
+ mutex_unlock(&adm->lock);
+ kfree(matrix_map);
+ return ret;
+}
+
+int q6adm_matrix_map_v1(struct device *dev, int path,
+ struct route_payload payload_map, int perf_mode)
+{
+ struct q6adm *adm = dev_get_drvdata(dev);
+ struct adm_routings_command route;
+ int ret, i, copp_idx;
+ struct copp *copp;
+ int copp_cnt = payload_map.num_copps;
+
+
+ route.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ route.hdr.pkt_size = sizeof(route);
+ route.hdr.src_svc = 0;
+ route.hdr.src_domain = APR_DOMAIN_APPS;
+ route.hdr.dest_svc = APR_SVC_ADM;
+ route.hdr.dest_domain = APR_DOMAIN_ADSP;
+ route.hdr.token = 0;
+ route.hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS;
+ route.num_sessions = 1;
+ route.session[0].id = payload_map.session_id;
+ route.session[0].num_copps = copp_cnt;
+
+ switch (path) {
+ case ADM_PATH_PLAYBACK:
+ route.path = AUDIO_RX;
+ break;
+ case ADM_PATH_LIVE_REC:
+ route.path = AUDIO_TX;
+ break;
+ default:
+ dev_err(dev, "Wrong path set[%d]\n", path);
+
+ break;
+ }
+
+ for (i = 0; i < payload_map.num_copps; i++) {
+ int port_idx = payload_map.port_id[i];
+
+ if (port_idx < 0) {
+ dev_err(dev, "Invalid port_id 0x%x\n",
+ payload_map.port_id[i]);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+ copp_idx = payload_map.copp_idx[i];
+
+ copp = adm_find_copp(adm, port_idx, copp_idx);
+ if (IS_ERR_OR_NULL(copp)) {
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+ route.session[0].copp_id[i] = copp->id;
+ }
+
+ mutex_lock(&adm->lock);
+ adm->matrix_map_stat = -1;
+
+ ret = apr_send_pkt(adm->apr, &route);
+ if (ret < 0) {
+ dev_err(dev, "routing for syream %d failed ret %d\n",
+ payload_map.session_id, ret);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+ ret = wait_event_timeout(adm->matrix_map_wait,
+ adm->matrix_map_stat >= 0,
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ dev_err(dev, "routing for syream %d failed\n",
+ payload_map.session_id);
+ ret = -ETIMEDOUT;
+ goto fail_cmd;
+ } else if (adm->matrix_map_stat > 0) {
+ dev_err(dev, "DSP returned error[%s]\n",
+ q6dsp_strerror(adm->matrix_map_stat));
+ ret = q6dsp_errno(adm->matrix_map_stat);
+ goto fail_cmd;
+ }
+
+fail_cmd:
+ mutex_unlock(&adm->lock);
+ return ret;
+}
+/**
+ * q6adm_matrix_map() - Map asm streams and afe ports using payload
+ *
+ * @dev: Pointer to adm child device.
+ * @path: playback or capture path.
+ * @payload_map: map between session id and afe ports.
+ * @perf_mode: Performace mode.
+ *
+ * Return: Will be an negative on error or a zero on success.
+ */
+int q6adm_matrix_map(struct device *dev, int path,
+ struct route_payload payload_map, int perf_mode)
+{
+ struct q6adm *adm = dev_get_drvdata(dev);
+
+ return adm->ops->matrix_map(dev, path, payload_map, perf_mode);
+}
+EXPORT_SYMBOL_GPL(q6adm_matrix_map);
+
+static int q6adm_device_close_v1(struct q6adm *adm, struct copp *copp,
+ int port_id, int copp_idx)
+{
+ struct apr_hdr close = {0};
+
+ close.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ close.pkt_size = sizeof(close);
+ close.src_svc = APR_SVC_ADM;
+ close.src_domain = APR_DOMAIN_APPS;
+ close.src_port = port_id;
+ close.dest_svc = APR_SVC_ADM;
+ close.dest_domain = APR_DOMAIN_ADSP;
+ close.dest_port = copp->id;
+ close.token = port_id << 16 | copp_idx;
+ close.opcode = ADM_CMD_COPP_CLOSE;
+
+ return q6adm_apr_send_copp_pkt(adm, copp, &close);
+}
+
+static int q6adm_device_close(struct q6adm *adm, struct copp *copp,
+ int port_id, int copp_idx)
+{
+ struct apr_hdr close = {0};
+
+ close.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ close.pkt_size = sizeof(close);
+ close.src_svc = APR_SVC_ADM;
+ close.src_domain = APR_DOMAIN_APPS;
+ close.src_port = port_id;
+ close.dest_svc = APR_SVC_ADM;
+ close.dest_domain = APR_DOMAIN_ADSP;
+ close.dest_port = copp->id;
+ close.token = port_id << 16 | copp_idx;
+ close.opcode = ADM_CMD_DEVICE_CLOSE_V5;
+
+ return q6adm_apr_send_copp_pkt(adm, copp, &close);
+}
+
+>>>>>>>
+/**
+ * q6adm_close() - Close adm copp
+ *
+ * @dev: Pointer to adm child device.
+<<<<<<<
+ * @copp: pointer to previously opened copp
+ *
+ * Return: Will be an negative on error or a zero on success.
+ */
+int q6adm_close(struct device *dev, struct q6copp *copp)
+{
+ struct q6adm *adm = dev_get_drvdata(dev->parent);
+ int ret = 0;
+
+ ret = q6adm_device_close(adm, copp, copp->afe_port, copp->copp_idx);
+ if (ret < 0) {
+ dev_err(adm->dev, "Failed to close copp %d\n", ret);
+ return ret;
+ }
+
+ kref_put(&copp->refcount, q6adm_free_copp);
+=======
+ * @port_id: afe port id.
+ * @perf_mode: perf_mode mode
+ * @copp_idx: copp index to close
+ *
+ * Return: Will be an negative on error or a zero on success.
+ */
+int q6adm_close(struct device *dev, int port_id, int perf_mode, int copp_idx)
+{
+ struct q6adm *adm = dev_get_drvdata(dev);
+ struct copp *copp;
+
+ if (port_id < 0) {
+ dev_err(dev, "Invalid port_id 0x%x\n", port_id);
+ return -EINVAL;
+ }
+
+ if ((copp_idx < 0) || (copp_idx >= MAX_COPPS_PER_PORT)) {
+ dev_err(dev, "Invalid copp idx: %d\n", copp_idx);
+ return -EINVAL;
+ }
+
+ copp = adm_find_copp(adm, port_id, copp_idx);
+ if (IS_ERR_OR_NULL(copp))
+ return -EINVAL;
+
+ mutex_lock(&copp->lock);
+ copp->refcnt--;
+ mutex_unlock(&copp->lock);
+ if (!copp->refcnt) {
+ int ret;
+
+ ret = adm->ops->close(adm, copp, port_id, copp_idx);
+ if (ret < 0)
+ return ret;
+
+ adm_free_copp(adm, copp, port_id);
+ }
+>>>>>>>
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(q6adm_close);
+
+<<<<<<<
+static int q6adm_probe(struct apr_device *adev)
+{
+ struct device *dev = &adev->dev;
+ struct device_node *dais_np;
+=======
+struct q6adm_api_data q6admv2_ops = {
+ .open = q6adm_device_open,
+ .close = q6adm_device_close,
+ .matrix_map = q6adm_matrix_map_v2,
+};
+
+struct q6adm_api_data q6admv1_ops = {
+ .open = q6adm_device_open_v1,
+ .close = q6adm_device_close_v1,
+ .matrix_map = q6adm_matrix_map_v1,
+};
+
+static int q6adm_probe(struct apr_device *adev)
+{
+>>>>>>>
+ struct q6adm *adm;
+
+ adm = devm_kzalloc(&adev->dev, sizeof(*adm), GFP_KERNEL);
+ if (!adm)
+ return -ENOMEM;
+
+<<<<<<<
+ adev->version = q6core_get_svc_version(adev->svc_id);
+
+ if (APR_SVC_MAJOR_VERSION(adev->version) >= 0x7)
+ adm->ops = &q6admv2_ops;
+ else if (!APR_SVC_MAJOR_VERSION(adev->version))
+ adm->ops = &q6admv1_ops;
+
+ if (!adm->ops) {
+ dev_err(&adev->dev, "Unsupported ADM version\n");
+ return -EINVAL;
+ }
+
+ adm->apr = adev;
+ dev_set_drvdata(&adev->dev, adm);
+ adm->dev = &adev->dev;
+ adm->matrix_map_stat = 0;
+=======
+ adm->apr = adev;
+ dev_set_drvdata(&adev->dev, adm);
+ adm->dev = dev;
+ q6core_get_svc_api_info(adev->svc_id, &adm->ainfo);
+>>>>>>>
+ mutex_init(&adm->lock);
+ init_waitqueue_head(&adm->matrix_map_wait);
+
+ INIT_LIST_HEAD(&adm->copps_list);
+ spin_lock_init(&adm->copps_list_lock);
+
+<<<<<<<
+ dais_np = of_get_child_by_name(dev->of_node, "routing");
+ if (dais_np) {
+ adm->pdev_routing = of_platform_device_create(dais_np,
+ "q6routing", dev);
+ of_node_put(dais_np);
+ }
+
+ return 0;
+}
+
+static int q6adm_remove(struct apr_device *adev)
+{
+ struct q6adm *adm = dev_get_drvdata(&adev->dev);
+
+ if (adm->pdev_routing)
+ of_platform_device_destroy(&adm->pdev_routing->dev, NULL);
+
+=======
+ return q6pcm_routing_probe(adm->dev);
+}
+
+static int q6adm_exit(struct apr_device *adev)
+{
+ q6pcm_routing_remove(&adev->dev);
+>>>>>>>
+ return 0;
+}
+
+static const struct of_device_id q6adm_device_id[] = {
+ { .compatible = "qcom,q6adm" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, q6adm_device_id);
+
+static struct apr_driver qcom_q6adm_driver = {
+ .probe = q6adm_probe,
+<<<<<<<
+ .remove = q6adm_exit,
+ .callback = adm_callback,
+=======
+ .remove = q6adm_remove,
+ .callback = q6adm_callback,
+>>>>>>>
+ .driver = {
+ .name = "qcom-q6adm",
+ .of_match_table = of_match_ptr(q6adm_device_id),
+ },
+};
+
+module_apr_driver(qcom_q6adm_driver);
+MODULE_DESCRIPTION("Q6 Audio Device Manager");
+MODULE_LICENSE("GPL v2");
diff --git a/rr-cache/da2f4fa23cdc2d2a73d81a6b53be31ece76d492c/thisimage b/rr-cache/da2f4fa23cdc2d2a73d81a6b53be31ece76d492c/thisimage
new file mode 100644
index 0000000..e946d28
--- /dev/null
+++ b/rr-cache/da2f4fa23cdc2d2a73d81a6b53be31ece76d492c/thisimage
@@ -0,0 +1,1390 @@
+// SPDX-License-Identifier: GPL-2.0
+<<<<<<<
+/*
+ * Copyright (c) 2011-2017, The Linux Foundation
+ * Copyright (c) 2018, Linaro Limited
+ */
+=======
+// Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+// Copyright (c) 2018, Linaro Limited
+>>>>>>>
+
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/jiffies.h>
+#include <linux/of.h>
+<<<<<<<
+=======
+#include <linux/of_platform.h>
+#include <linux/kref.h>
+>>>>>>>
+#include <linux/wait.h>
+#include <linux/soc/qcom/apr.h>
+#include <linux/platform_device.h>
+#include <sound/asound.h>
+#include "q6adm.h"
+#include "q6afe.h"
+#include "q6core.h"
+#include "q6dsp-errno.h"
+#include "q6dsp-common.h"
+
+#define ADM_CMD_DEVICE_OPEN_V5 0x00010326
+#define ADM_CMDRSP_DEVICE_OPEN_V5 0x00010329
+#define ADM_CMD_DEVICE_CLOSE_V5 0x00010327
+#define ADM_CMD_MATRIX_MAP_ROUTINGS_V5 0x00010325
+
+#define TIMEOUT_MS 1000
+#define RESET_COPP_ID 99
+#define INVALID_COPP_ID 0xFF
+/* Definition for a legacy device session. */
+#define ADM_LEGACY_DEVICE_SESSION 0
+#define ADM_MATRIX_ID_AUDIO_RX 0
+#define ADM_MATRIX_ID_AUDIO_TX 1
+<<<<<<<
+
+struct q6copp {
+ int afe_port;
+ int copp_idx;
+ int id;
+ int topology;
+ int mode;
+=======
+struct copp {
+ int afe_port;
+ int copp_idx;
+ int id;
+ int refcnt;
+ int topology;
+ int mode;
+ int stat;
+>>>>>>>
+ int rate;
+ int bit_width;
+ int channels;
+ int app_type;
+ int acdb_id;
+<<<<<<<
+ struct mutex lock;
+=======
+
+ struct aprv2_ibasic_rsp_result_t result;
+ struct kref refcount;
+>>>>>>>
+ wait_queue_head_t wait;
+ struct list_head node;
+ struct q6adm *adm;
+};
+
+<<<<<<<
+struct q6adm {
+ struct apr_device *apr;
+ struct device *dev;
+ struct q6core_svc_api_info ainfo;
+ unsigned long copp_bitmap[AFE_MAX_PORTS];
+ struct list_head copps_list;
+ spinlock_t copps_list_lock;
+ struct aprv2_ibasic_rsp_result_t result;
+ struct mutex lock;
+ wait_queue_head_t matrix_map_wait;
+ struct platform_device *pdev_routing;
+};
+
+struct q6adm_cmd_device_open_v5 {
+=======
+struct q6adm_api_data {
+ int (*open)(struct q6adm *adm, struct copp *copp, int port_id,
+ int path, int topology, int channel_mode,
+ int bit_width, int rate);
+ int (*matrix_map)(struct device *dev, int path,
+ struct route_payload payload_map, int perf_mode);
+ int (*close)(struct q6adm *adm, struct copp *copp,
+ int port_id, int copp_idx);
+
+};
+
+struct q6adm {
+ struct apr_device *apr;
+ struct device *dev;
+ unsigned long copp_bitmap[AFE_MAX_PORTS];
+ struct list_head copps_list;
+ spinlock_t copps_list_lock;
+ int matrix_map_stat;
+ struct mutex lock;
+ wait_queue_head_t matrix_map_wait;
+ void *routing_data;
+ struct q6adm_api_data *ops;
+};
+
+struct adm_cmd_device_open_v5 {
+ struct apr_hdr hdr;
+>>>>>>>
+ u16 flags;
+ u16 mode_of_operation;
+ u16 endpoint_id_1;
+ u16 endpoint_id_2;
+ u32 topology_id;
+ u16 dev_num_channel;
+ u16 bit_width;
+ u32 sample_rate;
+ u8 dev_channel_mapping[8];
+} __packed;
+
+<<<<<<<
+struct adm_cmd_matrix_map_routings_v5 {
+ struct apr_hdr hdr;
+=======
+struct q6adm_cmd_matrix_map_routings_v5 {
+>>>>>>>
+ u32 matrix_id;
+ u32 num_sessions;
+} __packed;
+
+<<<<<<<
+struct adm_session_map_node_v5 {
+=======
+struct q6adm_session_map_node_v5 {
+>>>>>>>
+ u16 session_id;
+ u16 num_copps;
+} __packed;
+
+<<<<<<<
+/*** V1 ***/
+#define AUDIO_RX 0x0
+#define AUDIO_TX 0x1
+
+#define ADM_MAX_COPPS 5
+#define ADM_CMD_COPP_OPEN 0x00010304
+struct adm_copp_open_command_v1 {
+ struct apr_hdr hdr;
+ u16 flags;
+ u16 mode; /* 1-RX, 2-Live TX, 3-Non Live TX */
+ u16 endpoint_id1;
+ u16 endpoint_id2;
+ u32 topology_id;
+ u16 channel_config;
+ u16 reserved;
+ u32 rate;
+} __attribute__ ((packed));
+
+#define ADM_CMD_COPP_CLOSE 0x00010305
+
+
+#define ADM_CMDRSP_COPP_OPEN 0x0001030A
+struct adm_copp_open_respond {
+ u32 status;
+ u16 copp_id;
+ u16 reserved;
+} __attribute__ ((packed));
+
+
+#define ADM_CMD_MATRIX_MAP_ROUTINGS 0x00010301
+struct adm_routings_session {
+ u16 id;
+ u16 num_copps;
+ u16 copp_id[ADM_MAX_COPPS+1]; /*Padding if numCopps is odd */
+} __packed;
+
+struct adm_routings_command {
+ struct apr_hdr hdr;
+ u32 path; /* 0 = Rx, 1 Tx */
+ u32 num_sessions;
+ struct adm_routings_session session[8];
+} __attribute__ ((packed));
+
+/*********/
+static struct copp *adm_find_copp(struct q6adm *adm, int port_idx,
+ int copp_idx)
+{
+ struct copp *c;
+
+ spin_lock(&adm->copps_list_lock);
+ list_for_each_entry(c, &adm->copps_list, node) {
+ if ((port_idx == c->afe_port) && (copp_idx == c->copp_idx)) {
+ spin_unlock(&adm->copps_list_lock);
+ return c;
+ }
+ }
+
+ spin_unlock(&adm->copps_list_lock);
+ return NULL;
+
+}
+
+static int adm_callback(struct apr_device *adev,
+ struct apr_client_message *data)
+{
+ struct aprv2_ibasic_rsp_result_t *result = data->payload;
+ int port_idx, copp_idx;
+ struct copp *copp;
+=======
+static struct q6copp *q6adm_find_copp(struct q6adm *adm, int port_idx,
+ int copp_idx)
+{
+ struct q6copp *c = NULL;
+ struct q6copp *ret = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adm->copps_list_lock, flags);
+ list_for_each_entry(c, &adm->copps_list, node) {
+ if ((port_idx == c->afe_port) && (copp_idx == c->copp_idx)) {
+ ret = c;
+ kref_get(&c->refcount);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&adm->copps_list_lock, flags);
+
+ return ret;
+
+}
+
+static void q6adm_free_copp(struct kref *ref)
+{
+ struct q6copp *c = container_of(ref, struct q6copp, refcount);
+ struct q6adm *adm = c->adm;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adm->copps_list_lock, flags);
+ clear_bit(c->copp_idx, &adm->copp_bitmap[c->afe_port]);
+ list_del(&c->node);
+ spin_unlock_irqrestore(&adm->copps_list_lock, flags);
+ kfree(c);
+}
+
+static int q6adm_callback(struct apr_device *adev, struct apr_resp_pkt *data)
+{
+ struct aprv2_ibasic_rsp_result_t *result = data->payload;
+ int port_idx, copp_idx;
+ struct apr_hdr *hdr = &data->hdr;
+ struct q6copp *copp;
+>>>>>>>
+ struct q6adm *adm = dev_get_drvdata(&adev->dev);
+
+ if (!data->payload_size)
+ return 0;
+
+<<<<<<<
+ copp_idx = (data->token) & 0XFF;
+ port_idx = ((data->token) >> 16) & 0xFF;
+ if (port_idx < 0 || port_idx >= AFE_MAX_PORTS) {
+ dev_err(&adev->dev, "Invalid port idx %d token %d\n",
+ port_idx, data->token);
+=======
+ copp_idx = (hdr->token) & 0XFF;
+ port_idx = ((hdr->token) >> 16) & 0xFF;
+ if (port_idx < 0 || port_idx >= AFE_MAX_PORTS) {
+ dev_err(&adev->dev, "Invalid port idx %d token %d\n",
+ port_idx, hdr->token);
+>>>>>>>
+ return 0;
+ }
+ if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
+ dev_err(&adev->dev, "Invalid copp idx %d token %d\n",
+<<<<<<<
+ copp_idx, data->token);
+ return 0;
+ }
+
+ switch (data->opcode) {
+=======
+ copp_idx, hdr->token);
+ return 0;
+ }
+
+ switch (hdr->opcode) {
+>>>>>>>
+ case APR_BASIC_RSP_RESULT: {
+ if (result->status != 0) {
+ dev_err(&adev->dev, "cmd = 0x%x return error = 0x%x\n",
+ result->opcode, result->status);
+ }
+ switch (result->opcode) {
+<<<<<<<
+ case ADM_CMD_COPP_OPEN:
+ case ADM_CMD_COPP_CLOSE:
+ case ADM_CMD_DEVICE_OPEN_V5:
+ case ADM_CMD_DEVICE_CLOSE_V5:
+ copp = adm_find_copp(adm, port_idx, copp_idx);
+ if (IS_ERR_OR_NULL(copp))
+ return 0;
+
+ copp->stat = result->status;
+ wake_up(&copp->wait);
+ break;
+ case ADM_CMD_MATRIX_MAP_ROUTINGS_V5:
+ case ADM_CMD_MATRIX_MAP_ROUTINGS:
+ adm->matrix_map_stat = result->status;
+=======
+ case ADM_CMD_DEVICE_OPEN_V5:
+ case ADM_CMD_DEVICE_CLOSE_V5:
+ copp = q6adm_find_copp(adm, port_idx, copp_idx);
+ if (!copp)
+ return 0;
+
+ copp->result = *result;
+ wake_up(&copp->wait);
+ kref_put(&copp->refcount, q6adm_free_copp);
+ break;
+ case ADM_CMD_MATRIX_MAP_ROUTINGS_V5:
+ adm->result = *result;
+>>>>>>>
+ wake_up(&adm->matrix_map_wait);
+ break;
+
+ default:
+ dev_err(&adev->dev, "Unknown Cmd: 0x%x\n",
+ result->opcode);
+ break;
+ }
+ return 0;
+ }
+<<<<<<<
+=======
+ case ADM_CMDRSP_COPP_OPEN:
+>>>>>>>
+ case ADM_CMDRSP_DEVICE_OPEN_V5: {
+ struct adm_cmd_rsp_device_open_v5 {
+ u32 status;
+ u16 copp_id;
+ u16 reserved;
+ } __packed * open = data->payload;
+
+<<<<<<<
+ copp = q6adm_find_copp(adm, port_idx, copp_idx);
+ if (!copp)
+=======
+ open = data->payload;
+ copp = adm_find_copp(adm, port_idx, copp_idx);
+ if (IS_ERR_OR_NULL(copp))
+>>>>>>>
+ return 0;
+
+ if (open->copp_id == INVALID_COPP_ID) {
+ dev_err(&adev->dev, "Invalid coppid rxed %d\n",
+ open->copp_id);
+<<<<<<<
+ copp->result.status = ADSP_EBADPARAM;
+ wake_up(&copp->wait);
+ kref_put(&copp->refcount, q6adm_free_copp);
+ break;
+ }
+ copp->result.opcode = hdr->opcode;
+ copp->id = open->copp_id;
+ wake_up(&copp->wait);
+ kref_put(&copp->refcount, q6adm_free_copp);
+=======
+ copp->stat = ADSP_EBADPARAM;
+ wake_up(&copp->wait);
+ break;
+ }
+ copp->stat = result->opcode;
+ copp->id = open->copp_id;
+ wake_up(&copp->wait);
+>>>>>>>
+ }
+ break;
+ default:
+ dev_err(&adev->dev, "Unknown cmd:0x%x\n",
+<<<<<<<
+ data->opcode);
+=======
+ hdr->opcode);
+>>>>>>>
+ break;
+ }
+
+ return 0;
+}
+
+<<<<<<<
+static struct copp *adm_alloc_copp(struct q6adm *adm, int port_idx)
+{
+ struct copp *c;
+=======
+static struct q6copp *q6adm_alloc_copp(struct q6adm *adm, int port_idx)
+{
+ struct q6copp *c;
+>>>>>>>
+ int idx;
+
+ idx = find_first_zero_bit(&adm->copp_bitmap[port_idx],
+ MAX_COPPS_PER_PORT);
+
+ if (idx > MAX_COPPS_PER_PORT)
+ return ERR_PTR(-EBUSY);
+
+<<<<<<<
+ c = kzalloc(sizeof(*c), GFP_ATOMIC);
+=======
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+>>>>>>>
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ set_bit(idx, &adm->copp_bitmap[port_idx]);
+ c->copp_idx = idx;
+ c->afe_port = port_idx;
+ c->adm = adm;
+
+<<<<<<<
+ init_waitqueue_head(&c->wait);
+
+ return c;
+}
+
+static int q6adm_apr_send_copp_pkt(struct q6adm *adm, struct q6copp *copp,
+ struct apr_pkt *pkt, uint32_t rsp_opcode)
+{
+ struct device *dev = adm->dev;
+ uint32_t opcode = pkt->hdr.opcode;
+ int ret;
+
+ mutex_lock(&adm->lock);
+ copp->result.opcode = 0;
+ copp->result.status = 0;
+ ret = apr_send_pkt(adm->apr, pkt);
+=======
+ mutex_init(&c->lock);
+ init_waitqueue_head(&c->wait);
+
+ spin_lock(&adm->copps_list_lock);
+ list_add_tail(&c->node, &adm->copps_list);
+ spin_unlock(&adm->copps_list_lock);
+
+ return c;
+}
+
+static void adm_free_copp(struct q6adm *adm, struct copp *c, int port_idx)
+{
+ clear_bit(c->copp_idx, &adm->copp_bitmap[port_idx]);
+ spin_lock(&adm->copps_list_lock);
+ list_del(&c->node);
+ spin_unlock(&adm->copps_list_lock);
+ kfree(c);
+}
+
+static struct copp *adm_find_matching_copp(struct q6adm *adm,
+ int port_id, int topology,
+ int mode, int rate, int channel_mode,
+ int bit_width, int app_type)
+{
+ struct copp *c;
+
+ spin_lock(&adm->copps_list_lock);
+
+ list_for_each_entry(c, &adm->copps_list, node) {
+ if ((port_id == c->afe_port) && (topology == c->topology) &&
+ (mode == c->mode) && (rate == c->rate) &&
+ (bit_width == c->bit_width) && (app_type == c->app_type)) {
+ spin_unlock(&adm->copps_list_lock);
+ return c;
+ }
+ }
+ spin_unlock(&adm->copps_list_lock);
+
+ c = adm_alloc_copp(adm, port_id);
+ if (IS_ERR_OR_NULL(c))
+ return ERR_CAST(c);
+
+ mutex_lock(&c->lock);
+ c->refcnt = 0;
+ c->topology = topology;
+ c->mode = mode;
+ c->rate = rate;
+ c->channels = channel_mode;
+ c->bit_width = bit_width;
+ c->app_type = app_type;
+ mutex_unlock(&c->lock);
+
+ return c;
+
+}
+
+static int q6adm_apr_send_copp_pkt(struct q6adm *adm, struct copp *copp,
+ void *data)
+{
+ struct device *dev = adm->dev;
+ int ret;
+
+ mutex_lock(&copp->lock);
+ copp->stat = -1;
+ ret = apr_send_pkt(adm->apr, data);
+>>>>>>>
+ if (ret < 0) {
+ dev_err(dev, "Failed to send APR packet\n");
+ ret = -EINVAL;
+ goto err;
+ }
+<<<<<<<
+ /* Wait for the callback with copp id */
+ ret = wait_event_timeout(copp->wait, copp->stat >= 0,
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ dev_err(dev, "ADM copp cmd timedout\n");
+ ret = -EINVAL;
+ } else if (copp->stat > 0) {
+ dev_err(dev, "DSP returned error[%s]\n",
+ q6dsp_strerror(copp->stat));
+ ret = q6dsp_errno(copp->stat);
+ }
+
+err:
+ mutex_unlock(&copp->lock);
+ return ret;
+}
+
+static int q6adm_device_open(struct q6adm *adm, struct copp *copp, int port_id,
+ int path, int topology, int channel_mode,
+ int bit_width, int rate)
+{
+ struct adm_cmd_device_open_v5 open = {0,};
+ int afe_port = q6afe_get_port_id(port_id);
+ int ret;
+
+ open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ open.hdr.pkt_size = sizeof(open);
+ open.hdr.src_svc = APR_SVC_ADM;
+ open.hdr.src_domain = APR_DOMAIN_APPS;
+ open.hdr.src_port = afe_port;
+ open.hdr.dest_svc = APR_SVC_ADM;
+ open.hdr.dest_domain = APR_DOMAIN_ADSP;
+ open.hdr.dest_port = afe_port;
+ open.hdr.token = port_id << 16 | copp->copp_idx;
+ open.hdr.opcode = ADM_CMD_DEVICE_OPEN_V5;
+ open.flags = ADM_LEGACY_DEVICE_SESSION;
+ open.mode_of_operation = path;
+ open.endpoint_id_1 = afe_port;
+ open.topology_id = topology;
+ open.dev_num_channel = channel_mode & 0x00FF;
+ open.bit_width = bit_width;
+ open.sample_rate = rate;
+
+ ret = q6dsp_map_channels(&open.dev_channel_mapping[0],
+ channel_mode);
+ if (ret)
+ return ret;
+
+ return q6adm_apr_send_copp_pkt(adm, copp, &open);
+}
+
+static int q6adm_device_open_v1(struct q6adm *adm, struct copp *copp, int port_id,
+ int path, int topology, int channel_mode,
+ int bit_width, int rate)
+{
+ struct adm_copp_open_command_v1 open = {0,};
+ int afe_port = port_id;
+
+ open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ open.hdr.pkt_size = sizeof(open);
+ open.hdr.src_svc = APR_SVC_ADM;
+ open.hdr.src_domain = APR_DOMAIN_APPS;
+ open.hdr.src_port = afe_port;
+ open.hdr.dest_svc = APR_SVC_ADM;
+ open.hdr.dest_domain = APR_DOMAIN_ADSP;
+ open.hdr.dest_port = afe_port;
+ open.hdr.token = port_id << 16 | copp->copp_idx;
+ open.hdr.opcode = ADM_CMD_COPP_OPEN;
+
+ open.flags = ADM_LEGACY_DEVICE_SESSION;
+ open.mode = path;
+ open.endpoint_id1 = afe_port;
+ open.endpoint_id2 = 0xFFFF;
+ open.topology_id = topology;
+ open.channel_config = channel_mode & 0x00FF;
+ open.rate = rate;
+
+ return q6adm_apr_send_copp_pkt(adm, copp, &open);
+}
+=======
+
+ /* Wait for the callback with copp id */
+ if (rsp_opcode)
+ ret = wait_event_timeout(copp->wait,
+ (copp->result.opcode == opcode) ||
+ (copp->result.opcode == rsp_opcode),
+ msecs_to_jiffies(TIMEOUT_MS));
+ else
+ ret = wait_event_timeout(copp->wait,
+ (copp->result.opcode == opcode),
+ msecs_to_jiffies(TIMEOUT_MS));
+
+ if (!ret) {
+ dev_err(dev, "ADM copp cmd timedout\n");
+ ret = -ETIMEDOUT;
+ } else if (copp->result.status > 0) {
+ dev_err(dev, "DSP returned error[%d]\n",
+ copp->result.status);
+ ret = -EINVAL;
+ }
+
+err:
+ mutex_unlock(&adm->lock);
+ return ret;
+}
+
+static int q6adm_device_close(struct q6adm *adm, struct q6copp *copp,
+ int port_id, int copp_idx)
+{
+ struct apr_pkt close;
+
+ close.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ close.hdr.pkt_size = sizeof(close);
+ close.hdr.src_port = port_id;
+ close.hdr.dest_port = copp->id;
+ close.hdr.token = port_id << 16 | copp_idx;
+ close.hdr.opcode = ADM_CMD_DEVICE_CLOSE_V5;
+
+ return q6adm_apr_send_copp_pkt(adm, copp, &close, 0);
+}
+
+static struct q6copp *q6adm_find_matching_copp(struct q6adm *adm,
+ int port_id, int topology,
+ int mode, int rate,
+ int channel_mode, int bit_width,
+ int app_type)
+{
+ struct q6copp *c = NULL;
+ struct q6copp *ret = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adm->copps_list_lock, flags);
+
+ list_for_each_entry(c, &adm->copps_list, node) {
+ if ((port_id == c->afe_port) && (topology == c->topology) &&
+ (mode == c->mode) && (rate == c->rate) &&
+ (bit_width == c->bit_width) && (app_type == c->app_type)) {
+ ret = c;
+ kref_get(&c->refcount);
+ }
+ }
+ spin_unlock_irqrestore(&adm->copps_list_lock, flags);
+
+ return ret;
+}
+
+static int q6adm_device_open(struct q6adm *adm, struct q6copp *copp,
+ int port_id, int path, int topology,
+ int channel_mode, int bit_width, int rate)
+{
+ struct q6adm_cmd_device_open_v5 *open;
+ int afe_port = q6afe_get_port_id(port_id);
+ struct apr_pkt *pkt;
+ void *p;
+ int ret, pkt_size;
+
+ pkt_size = APR_HDR_SIZE + sizeof(*open);
+ p = kzalloc(pkt_size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ open = p + APR_HDR_SIZE;
+ pkt->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ pkt->hdr.pkt_size = pkt_size;
+ pkt->hdr.src_port = afe_port;
+ pkt->hdr.dest_port = afe_port;
+ pkt->hdr.token = port_id << 16 | copp->copp_idx;
+ pkt->hdr.opcode = ADM_CMD_DEVICE_OPEN_V5;
+ open->flags = ADM_LEGACY_DEVICE_SESSION;
+ open->mode_of_operation = path;
+ open->endpoint_id_1 = afe_port;
+ open->topology_id = topology;
+ open->dev_num_channel = channel_mode & 0x00FF;
+ open->bit_width = bit_width;
+ open->sample_rate = rate;
+
+ ret = q6dsp_map_channels(&open->dev_channel_mapping[0],
+ channel_mode);
+ if (ret)
+ goto err;
+
+ ret = q6adm_apr_send_copp_pkt(adm, copp, pkt,
+ ADM_CMDRSP_DEVICE_OPEN_V5);
+
+err:
+ kfree(pkt);
+ return ret;
+}
+
+>>>>>>>
+/**
+ * q6adm_open() - open adm and grab a free copp
+ *
+ * @dev: Pointer to adm child device.
+ * @port_id: port id
+ * @path: playback or capture path.
+ * @rate: rate at which copp is required.
+ * @channel_mode: channel mode
+ * @topology: adm topology id
+ * @perf_mode: performace mode.
+ * @bit_width: audio sample bit width
+ * @app_type: Application type.
+ * @acdb_id: ACDB id
+ *
+<<<<<<<
+ * Return: Will be an negative on error or a valid copp index on success.
+ */
+int q6adm_open(struct device *dev, int port_id, int path, int rate,
+ int channel_mode, int topology, int perf_mode,
+ uint16_t bit_width, int app_type, int acdb_id)
+{
+ struct q6adm *adm = dev_get_drvdata(dev);
+ struct copp *copp;
+=======
+ * Return: Will be an negative on error or a valid copp pointer on success.
+ */
+struct q6copp *q6adm_open(struct device *dev, int port_id, int path, int rate,
+ int channel_mode, int topology, int perf_mode,
+ uint16_t bit_width, int app_type, int acdb_id)
+{
+ struct q6adm *adm = dev_get_drvdata(dev->parent);
+ struct q6copp *copp;
+ unsigned long flags;
+>>>>>>>
+ int ret = 0;
+
+ if (port_id < 0) {
+ dev_err(dev, "Invalid port_id 0x%x\n", port_id);
+<<<<<<<
+ return -EINVAL;
+ }
+
+ copp = adm_find_matching_copp(adm, port_id, topology, perf_mode,
+ rate, channel_mode, bit_width, app_type);
+
+ /* Create a COPP if port id are not enabled */
+ if (copp->refcnt == 0) {
+ ret = adm->ops->open(adm, copp, port_id, path, topology,
+ channel_mode, bit_width, rate);
+ if (ret < 0)
+ return ret;
+ }
+ mutex_lock(&copp->lock);
+ copp->refcnt++;
+ mutex_unlock(&copp->lock);
+
+ return copp->copp_idx;
+=======
+ return ERR_PTR(-EINVAL);
+ }
+
+ copp = q6adm_find_matching_copp(adm, port_id, topology, perf_mode,
+ rate, channel_mode, bit_width, app_type);
+ if (copp) {
+ dev_err(dev, "Found Matching Copp 0x%x\n", copp->copp_idx);
+ return copp;
+ }
+
+ spin_lock_irqsave(&adm->copps_list_lock, flags);
+ copp = q6adm_alloc_copp(adm, port_id);
+ if (IS_ERR_OR_NULL(copp)) {
+ spin_unlock_irqrestore(&adm->copps_list_lock, flags);
+ return ERR_CAST(copp);
+ }
+
+ list_add_tail(&copp->node, &adm->copps_list);
+ spin_unlock_irqrestore(&adm->copps_list_lock, flags);
+
+ kref_init(&copp->refcount);
+ copp->topology = topology;
+ copp->mode = perf_mode;
+ copp->rate = rate;
+ copp->channels = channel_mode;
+ copp->bit_width = bit_width;
+ copp->app_type = app_type;
+
+
+ ret = q6adm_device_open(adm, copp, port_id, path, topology,
+ channel_mode, bit_width, rate);
+ if (ret < 0) {
+ kref_put(&copp->refcount, q6adm_free_copp);
+ return ERR_PTR(ret);
+ }
+
+ return copp;
+>>>>>>>
+}
+EXPORT_SYMBOL_GPL(q6adm_open);
+
+/**
+<<<<<<<
+ * q6adm_get_copp_id() - get copp index
+ *
+ * @copp: Pointer to valid copp
+ *
+ * Return: Will be an negative on error or a valid copp index on success.
+ **/
+int q6adm_get_copp_id(struct q6copp *copp)
+{
+ if (!copp)
+ return -EINVAL;
+
+ return copp->copp_idx;
+}
+EXPORT_SYMBOL_GPL(q6adm_get_copp_id);
+
+/**
+ * q6adm_matrix_map() - Map asm streams and afe ports using payload
+ *
+ * @dev: Pointer to adm child device.
+ * @path: playback or capture path.
+ * @payload_map: map between session id and afe ports.
+ * @perf_mode: Performace mode.
+ *
+ * Return: Will be an negative on error or a zero on success.
+ */
+int q6adm_matrix_map(struct device *dev, int path,
+ struct route_payload payload_map, int perf_mode)
+{
+ struct q6adm *adm = dev_get_drvdata(dev->parent);
+ struct q6adm_cmd_matrix_map_routings_v5 *route;
+ struct q6adm_session_map_node_v5 *node;
+ struct apr_pkt *pkt;
+ uint16_t *copps_list;
+ int pkt_size, ret, i, copp_idx;
+ void *matrix_map = NULL;
+ struct q6copp *copp;
+
+ /* Assumes port_ids have already been validated during adm_open */
+ pkt_size = (APR_HDR_SIZE + sizeof(*route) + sizeof(*node) +
+ (sizeof(uint32_t) * payload_map.num_copps));
+
+ matrix_map = kzalloc(pkt_size, GFP_KERNEL);
+ if (!matrix_map)
+ return -ENOMEM;
+
+ pkt = matrix_map;
+ route = matrix_map + APR_HDR_SIZE;
+ node = matrix_map + APR_HDR_SIZE + sizeof(*route);
+ copps_list = matrix_map + APR_HDR_SIZE + sizeof(*route) + sizeof(*node);
+
+ pkt->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ pkt->hdr.pkt_size = pkt_size;
+ pkt->hdr.token = 0;
+ pkt->hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS_V5;
+=======
+ * q6adm_set_routing_data() - set routing private data
+ *
+ * @dev: Pointer to adm device.
+ * @data: routing private data
+ *
+ */
+void q6adm_set_routing_data(struct device *dev, void *data)
+{
+ struct q6adm *adm = dev_get_drvdata(dev);
+
+ adm->routing_data = data;
+}
+EXPORT_SYMBOL_GPL(q6adm_set_routing_data);
+
+/**
+ * q6adm_get_routing_data() - get routing private data
+ *
+ * @dev: Pointer to adm device.
+ *
+ * Return: pointer to routing private data
+ */
+void *q6adm_get_routing_data(struct device *dev)
+{
+ struct q6adm *adm = dev_get_drvdata(dev);
+
+ return adm->routing_data;
+}
+EXPORT_SYMBOL_GPL(q6adm_get_routing_data);
+
+int q6adm_matrix_map_v2(struct device *dev, int path,
+ struct route_payload payload_map, int perf_mode)
+{
+ struct q6adm *adm = dev_get_drvdata(dev);
+ struct adm_cmd_matrix_map_routings_v5 *route;
+ struct adm_session_map_node_v5 *node;
+ uint16_t *copps_list;
+ int cmd_size, ret, i, copp_idx;
+ void *matrix_map = NULL;
+ struct copp *copp;
+
+ /* Assumes port_ids have already been validated during adm_open */
+ cmd_size = (sizeof(*route) +
+ sizeof(*node) +
+ (sizeof(uint32_t) * payload_map.num_copps));
+ matrix_map = kzalloc(cmd_size, GFP_KERNEL);
+ if (!matrix_map)
+ return -ENOMEM;
+
+ route = matrix_map;
+ route->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ route->hdr.pkt_size = cmd_size;
+ route->hdr.src_svc = 0;
+ route->hdr.src_domain = APR_DOMAIN_APPS;
+ route->hdr.dest_svc = APR_SVC_ADM;
+ route->hdr.dest_domain = APR_DOMAIN_ADSP;
+ route->hdr.token = 0;
+ route->hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS_V5;
+>>>>>>>
+ route->num_sessions = 1;
+
+ switch (path) {
+ case ADM_PATH_PLAYBACK:
+ route->matrix_id = ADM_MATRIX_ID_AUDIO_RX;
+ break;
+ case ADM_PATH_LIVE_REC:
+ route->matrix_id = ADM_MATRIX_ID_AUDIO_TX;
+ break;
+ default:
+ dev_err(dev, "Wrong path set[%d]\n", path);
+<<<<<<<
+ break;
+ }
+
+ node->session_id = payload_map.session_id;
+ node->num_copps = payload_map.num_copps;
+=======
+
+ break;
+ }
+
+ node = matrix_map + sizeof(*route);
+ node->session_id = payload_map.session_id;
+ node->num_copps = payload_map.num_copps;
+ copps_list = matrix_map + sizeof(*route) + sizeof(*node);
+>>>>>>>
+
+ for (i = 0; i < payload_map.num_copps; i++) {
+ int port_idx = payload_map.port_id[i];
+
+ if (port_idx < 0) {
+ dev_err(dev, "Invalid port_id 0x%x\n",
+ payload_map.port_id[i]);
+<<<<<<<
+ kfree(pkt);
+ return -EINVAL;
+ }
+ copp_idx = payload_map.copp_idx[i];
+
+ copp = q6adm_find_copp(adm, port_idx, copp_idx);
+ if (!copp) {
+ kfree(pkt);
+ return -EINVAL;
+ }
+
+ copps_list[i] = copp->id;
+ kref_put(&copp->refcount, q6adm_free_copp);
+ }
+
+ mutex_lock(&adm->lock);
+ adm->result.status = 0;
+ adm->result.opcode = 0;
+
+ ret = apr_send_pkt(adm->apr, pkt);
+ if (ret < 0) {
+ dev_err(dev, "routing for stream %d failed ret %d\n",
+ payload_map.session_id, ret);
+ goto fail_cmd;
+ }
+ ret = wait_event_timeout(adm->matrix_map_wait,
+ adm->result.opcode == pkt->hdr.opcode,
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ dev_err(dev, "routing for stream %d failed\n",
+ payload_map.session_id);
+ ret = -ETIMEDOUT;
+ goto fail_cmd;
+ } else if (adm->result.status > 0) {
+ dev_err(dev, "DSP returned error[%d]\n",
+ adm->result.status);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+
+fail_cmd:
+ mutex_unlock(&adm->lock);
+ kfree(pkt);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(q6adm_matrix_map);
+
+=======
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+ copp_idx = payload_map.copp_idx[i];
+
+ copp = adm_find_copp(adm, port_idx, copp_idx);
+ if (IS_ERR_OR_NULL(copp)) {
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+
+ copps_list[i] = copp->id;
+ }
+
+ mutex_lock(&adm->lock);
+ adm->matrix_map_stat = -1;
+
+ ret = apr_send_pkt(adm->apr, matrix_map);
+ if (ret < 0) {
+ dev_err(dev, "routing for syream %d failed ret %d\n",
+ payload_map.session_id, ret);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+ ret = wait_event_timeout(adm->matrix_map_wait,
+ adm->matrix_map_stat >= 0,
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ dev_err(dev, "routing for syream %d failed\n",
+ payload_map.session_id);
+ ret = -ETIMEDOUT;
+ goto fail_cmd;
+ } else if (adm->matrix_map_stat > 0) {
+ dev_err(dev, "DSP returned error[%s]\n",
+ q6dsp_strerror(adm->matrix_map_stat));
+ ret = q6dsp_errno(adm->matrix_map_stat);
+ goto fail_cmd;
+ }
+
+fail_cmd:
+ mutex_unlock(&adm->lock);
+ kfree(matrix_map);
+ return ret;
+}
+
+int q6adm_matrix_map_v1(struct device *dev, int path,
+ struct route_payload payload_map, int perf_mode)
+{
+ struct q6adm *adm = dev_get_drvdata(dev);
+ struct adm_routings_command route;
+ int ret, i, copp_idx;
+ struct copp *copp;
+ int copp_cnt = payload_map.num_copps;
+
+
+ route.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ route.hdr.pkt_size = sizeof(route);
+ route.hdr.src_svc = 0;
+ route.hdr.src_domain = APR_DOMAIN_APPS;
+ route.hdr.dest_svc = APR_SVC_ADM;
+ route.hdr.dest_domain = APR_DOMAIN_ADSP;
+ route.hdr.token = 0;
+ route.hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS;
+ route.num_sessions = 1;
+ route.session[0].id = payload_map.session_id;
+ route.session[0].num_copps = copp_cnt;
+
+ switch (path) {
+ case ADM_PATH_PLAYBACK:
+ route.path = AUDIO_RX;
+ break;
+ case ADM_PATH_LIVE_REC:
+ route.path = AUDIO_TX;
+ break;
+ default:
+ dev_err(dev, "Wrong path set[%d]\n", path);
+
+ break;
+ }
+
+ for (i = 0; i < payload_map.num_copps; i++) {
+ int port_idx = payload_map.port_id[i];
+
+ if (port_idx < 0) {
+ dev_err(dev, "Invalid port_id 0x%x\n",
+ payload_map.port_id[i]);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+ copp_idx = payload_map.copp_idx[i];
+
+ copp = adm_find_copp(adm, port_idx, copp_idx);
+ if (IS_ERR_OR_NULL(copp)) {
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+ route.session[0].copp_id[i] = copp->id;
+ }
+
+ mutex_lock(&adm->lock);
+ adm->matrix_map_stat = -1;
+
+ ret = apr_send_pkt(adm->apr, &route);
+ if (ret < 0) {
+ dev_err(dev, "routing for syream %d failed ret %d\n",
+ payload_map.session_id, ret);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+ ret = wait_event_timeout(adm->matrix_map_wait,
+ adm->matrix_map_stat >= 0,
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ dev_err(dev, "routing for syream %d failed\n",
+ payload_map.session_id);
+ ret = -ETIMEDOUT;
+ goto fail_cmd;
+ } else if (adm->matrix_map_stat > 0) {
+ dev_err(dev, "DSP returned error[%s]\n",
+ q6dsp_strerror(adm->matrix_map_stat));
+ ret = q6dsp_errno(adm->matrix_map_stat);
+ goto fail_cmd;
+ }
+
+fail_cmd:
+ mutex_unlock(&adm->lock);
+ return ret;
+}
+/**
+ * q6adm_matrix_map() - Map asm streams and afe ports using payload
+ *
+ * @dev: Pointer to adm child device.
+ * @path: playback or capture path.
+ * @payload_map: map between session id and afe ports.
+ * @perf_mode: Performace mode.
+ *
+ * Return: Will be an negative on error or a zero on success.
+ */
+int q6adm_matrix_map(struct device *dev, int path,
+ struct route_payload payload_map, int perf_mode)
+{
+ struct q6adm *adm = dev_get_drvdata(dev);
+
+ return adm->ops->matrix_map(dev, path, payload_map, perf_mode);
+}
+EXPORT_SYMBOL_GPL(q6adm_matrix_map);
+
+static int q6adm_device_close_v1(struct q6adm *adm, struct copp *copp,
+ int port_id, int copp_idx)
+{
+ struct apr_hdr close = {0};
+
+ close.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ close.pkt_size = sizeof(close);
+ close.src_svc = APR_SVC_ADM;
+ close.src_domain = APR_DOMAIN_APPS;
+ close.src_port = port_id;
+ close.dest_svc = APR_SVC_ADM;
+ close.dest_domain = APR_DOMAIN_ADSP;
+ close.dest_port = copp->id;
+ close.token = port_id << 16 | copp_idx;
+ close.opcode = ADM_CMD_COPP_CLOSE;
+
+ return q6adm_apr_send_copp_pkt(adm, copp, &close);
+}
+
+static int q6adm_device_close(struct q6adm *adm, struct copp *copp,
+ int port_id, int copp_idx)
+{
+ struct apr_hdr close = {0};
+
+ close.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ close.pkt_size = sizeof(close);
+ close.src_svc = APR_SVC_ADM;
+ close.src_domain = APR_DOMAIN_APPS;
+ close.src_port = port_id;
+ close.dest_svc = APR_SVC_ADM;
+ close.dest_domain = APR_DOMAIN_ADSP;
+ close.dest_port = copp->id;
+ close.token = port_id << 16 | copp_idx;
+ close.opcode = ADM_CMD_DEVICE_CLOSE_V5;
+
+ return q6adm_apr_send_copp_pkt(adm, copp, &close);
+}
+
+>>>>>>>
+/**
+ * q6adm_close() - Close adm copp
+ *
+ * @dev: Pointer to adm child device.
+<<<<<<<
+ * @copp: pointer to previously opened copp
+ *
+ * Return: Will be an negative on error or a zero on success.
+ */
+int q6adm_close(struct device *dev, struct q6copp *copp)
+{
+ struct q6adm *adm = dev_get_drvdata(dev->parent);
+ int ret = 0;
+
+ ret = q6adm_device_close(adm, copp, copp->afe_port, copp->copp_idx);
+ if (ret < 0) {
+ dev_err(adm->dev, "Failed to close copp %d\n", ret);
+ return ret;
+ }
+
+ kref_put(&copp->refcount, q6adm_free_copp);
+=======
+ * @port_id: afe port id.
+ * @perf_mode: perf_mode mode
+ * @copp_idx: copp index to close
+ *
+ * Return: Will be an negative on error or a zero on success.
+ */
+int q6adm_close(struct device *dev, int port_id, int perf_mode, int copp_idx)
+{
+ struct q6adm *adm = dev_get_drvdata(dev);
+ struct copp *copp;
+
+ if (port_id < 0) {
+ dev_err(dev, "Invalid port_id 0x%x\n", port_id);
+ return -EINVAL;
+ }
+
+ if ((copp_idx < 0) || (copp_idx >= MAX_COPPS_PER_PORT)) {
+ dev_err(dev, "Invalid copp idx: %d\n", copp_idx);
+ return -EINVAL;
+ }
+
+ copp = adm_find_copp(adm, port_id, copp_idx);
+ if (IS_ERR_OR_NULL(copp))
+ return -EINVAL;
+
+ mutex_lock(&copp->lock);
+ copp->refcnt--;
+ mutex_unlock(&copp->lock);
+ if (!copp->refcnt) {
+ int ret;
+
+ ret = adm->ops->close(adm, copp, port_id, copp_idx);
+ if (ret < 0)
+ return ret;
+
+ adm_free_copp(adm, copp, port_id);
+ }
+>>>>>>>
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(q6adm_close);
+
+<<<<<<<
+static int q6adm_probe(struct apr_device *adev)
+{
+ struct device *dev = &adev->dev;
+ struct device_node *dais_np;
+=======
+struct q6adm_api_data q6admv2_ops = {
+ .open = q6adm_device_open,
+ .close = q6adm_device_close,
+ .matrix_map = q6adm_matrix_map_v2,
+};
+
+struct q6adm_api_data q6admv1_ops = {
+ .open = q6adm_device_open_v1,
+ .close = q6adm_device_close_v1,
+ .matrix_map = q6adm_matrix_map_v1,
+};
+
+static int q6adm_probe(struct apr_device *adev)
+{
+>>>>>>>
+ struct q6adm *adm;
+
+ adm = devm_kzalloc(&adev->dev, sizeof(*adm), GFP_KERNEL);
+ if (!adm)
+ return -ENOMEM;
+
+<<<<<<<
+ adev->version = q6core_get_svc_version(adev->svc_id);
+
+ if (APR_SVC_MAJOR_VERSION(adev->version) >= 0x7)
+ adm->ops = &q6admv2_ops;
+ else if (!APR_SVC_MAJOR_VERSION(adev->version))
+ adm->ops = &q6admv1_ops;
+
+ if (!adm->ops) {
+ dev_err(&adev->dev, "Unsupported ADM version\n");
+ return -EINVAL;
+ }
+
+ adm->apr = adev;
+ dev_set_drvdata(&adev->dev, adm);
+ adm->dev = &adev->dev;
+ adm->matrix_map_stat = 0;
+=======
+ adm->apr = adev;
+ dev_set_drvdata(&adev->dev, adm);
+ adm->dev = dev;
+ q6core_get_svc_api_info(adev->svc_id, &adm->ainfo);
+>>>>>>>
+ mutex_init(&adm->lock);
+ init_waitqueue_head(&adm->matrix_map_wait);
+
+ INIT_LIST_HEAD(&adm->copps_list);
+ spin_lock_init(&adm->copps_list_lock);
+
+<<<<<<<
+ dais_np = of_get_child_by_name(dev->of_node, "routing");
+ if (dais_np) {
+ adm->pdev_routing = of_platform_device_create(dais_np,
+ "q6routing", dev);
+ of_node_put(dais_np);
+ }
+
+ return 0;
+}
+
+static int q6adm_remove(struct apr_device *adev)
+{
+ struct q6adm *adm = dev_get_drvdata(&adev->dev);
+
+ if (adm->pdev_routing)
+ of_platform_device_destroy(&adm->pdev_routing->dev, NULL);
+
+=======
+ return q6pcm_routing_probe(adm->dev);
+}
+
+static int q6adm_exit(struct apr_device *adev)
+{
+ q6pcm_routing_remove(&adev->dev);
+>>>>>>>
+ return 0;
+}
+
+static const struct of_device_id q6adm_device_id[] = {
+ { .compatible = "qcom,q6adm" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, q6adm_device_id);
+
+static struct apr_driver qcom_q6adm_driver = {
+ .probe = q6adm_probe,
+<<<<<<<
+ .remove = q6adm_exit,
+ .callback = adm_callback,
+=======
+ .remove = q6adm_remove,
+ .callback = q6adm_callback,
+>>>>>>>
+ .driver = {
+ .name = "qcom-q6adm",
+ .of_match_table = of_match_ptr(q6adm_device_id),
+ },
+};
+
+module_apr_driver(qcom_q6adm_driver);
+MODULE_DESCRIPTION("Q6 Audio Device Manager");
+MODULE_LICENSE("GPL v2");
diff --git a/rr-cache/dda7250f4f5d0e7703ebbcda2aed73cea685cc09/postimage b/rr-cache/dda7250f4f5d0e7703ebbcda2aed73cea685cc09/postimage
new file mode 100644
index 0000000..f3fdf4a
--- /dev/null
+++ b/rr-cache/dda7250f4f5d0e7703ebbcda2aed73cea685cc09/postimage
@@ -0,0 +1,1255 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SDM845 SoC device tree source
+ *
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+ interrupt-parent = <&intc>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ aliases {
+ i2c0 = &i2c0;
+ i2c1 = &i2c1;
+ i2c2 = &i2c2;
+ i2c3 = &i2c3;
+ i2c4 = &i2c4;
+ i2c5 = &i2c5;
+ i2c6 = &i2c6;
+ i2c7 = &i2c7;
+ i2c8 = &i2c8;
+ i2c9 = &i2c9;
+ i2c10 = &i2c10;
+ i2c11 = &i2c11;
+ i2c12 = &i2c12;
+ i2c13 = &i2c13;
+ i2c14 = &i2c14;
+ i2c15 = &i2c15;
+ spi0 = &spi0;
+ spi1 = &spi1;
+ spi2 = &spi2;
+ spi3 = &spi3;
+ spi4 = &spi4;
+ spi5 = &spi5;
+ spi6 = &spi6;
+ spi7 = &spi7;
+ spi8 = &spi8;
+ spi9 = &spi9;
+ spi10 = &spi10;
+ spi11 = &spi11;
+ spi12 = &spi12;
+ spi13 = &spi13;
+ spi14 = &spi14;
+ spi15 = &spi15;
+ };
+
+ chosen { };
+
+ memory@80000000 {
+ device_type = "memory";
+ /* We expect the bootloader to fill in the size */
+ reg = <0 0x80000000 0 0>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ memory@85fc0000 {
+ reg = <0 0x85fc0000 0 0x20000>;
+ no-map;
+ };
+
+ memory@85fe0000 {
+ compatible = "qcom,cmd-db";
+ reg = <0x0 0x85fe0000 0x0 0x20000>;
+ no-map;
+ };
+
+ smem_mem: memory@86000000 {
+ reg = <0x0 0x86000000 0x0 0x200000>;
+ no-map;
+ };
+
+ memory@86200000 {
+ reg = <0 0x86200000 0 0x2d00000>;
+ no-map;
+ };
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ CPU0: cpu@0 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ next-level-cache = <&L2_0>;
+ L2_0: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ L3_0: l3-cache {
+ compatible = "cache";
+ };
+ };
+ };
+
+ CPU1: cpu@100 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x100>;
+ enable-method = "psci";
+ next-level-cache = <&L2_100>;
+ L2_100: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU2: cpu@200 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x200>;
+ enable-method = "psci";
+ next-level-cache = <&L2_200>;
+ L2_200: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU3: cpu@300 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x300>;
+ enable-method = "psci";
+ next-level-cache = <&L2_300>;
+ L2_300: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU4: cpu@400 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x400>;
+ enable-method = "psci";
+ next-level-cache = <&L2_400>;
+ L2_400: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU5: cpu@500 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x500>;
+ enable-method = "psci";
+ next-level-cache = <&L2_500>;
+ L2_500: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU6: cpu@600 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x600>;
+ enable-method = "psci";
+ next-level-cache = <&L2_600>;
+ L2_600: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU7: cpu@700 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x700>;
+ enable-method = "psci";
+ next-level-cache = <&L2_700>;
+ L2_700: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 1 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 2 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 3 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 0 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ clocks {
+ xo_board: xo-board {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <38400000>;
+ clock-output-names = "xo_board";
+ };
+
+ sleep_clk: sleep-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32764>;
+ };
+ };
+
+ tcsr_mutex: hwlock {
+ compatible = "qcom,tcsr-mutex";
+ syscon = <&tcsr_mutex_regs 0 0x1000>;
+ #hwlock-cells = <1>;
+ };
+
+ smem {
+ compatible = "qcom,smem";
+ memory-region = <&smem_mem>;
+ hwlocks = <&tcsr_mutex 3>;
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ soc: soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0xffffffff>;
+ compatible = "simple-bus";
+
+ gcc: clock-controller@100000 {
+ compatible = "qcom,gcc-sdm845";
+ reg = <0x100000 0x1f0000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
+ qupv3_id_0: geniqup@8c0000 {
+ compatible = "qcom,geni-se-qup";
+ reg = <0x8c0000 0x6000>;
+ clock-names = "m-ahb", "s-ahb";
+ clocks = <&gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+ <&gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ i2c0: i2c@880000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x880000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S0_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c0_default>;
+ interrupts = <GIC_SPI 601 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi0: spi@880000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x880000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S0_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_spi0_default>;
+ interrupts = <GIC_SPI 601 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@884000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x884000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S1_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c1_default>;
+ interrupts = <GIC_SPI 602 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi1: spi@884000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x884000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S1_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_spi1_default>;
+ interrupts = <GIC_SPI 602 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@888000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x888000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S2_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c2_default>;
+ interrupts = <GIC_SPI 603 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi2: spi@888000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x888000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S2_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_spi2_default>;
+ interrupts = <GIC_SPI 603 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@88c000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x88c000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S3_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c3_default>;
+ interrupts = <GIC_SPI 604 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi3: spi@88c000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x88c000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S3_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_spi3_default>;
+ interrupts = <GIC_SPI 604 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c4: i2c@890000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x890000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S4_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c4_default>;
+ interrupts = <GIC_SPI 605 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi4: spi@890000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x890000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S4_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_spi4_default>;
+ interrupts = <GIC_SPI 605 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c5: i2c@894000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x894000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S5_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c5_default>;
+ interrupts = <GIC_SPI 606 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi5: spi@894000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x894000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S5_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_spi5_default>;
+ interrupts = <GIC_SPI 606 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c6: i2c@898000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x898000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S6_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c6_default>;
+ interrupts = <GIC_SPI 607 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi6: spi@898000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x898000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S6_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_spi6_default>;
+ interrupts = <GIC_SPI 607 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c7: i2c@89c000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x89c000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S7_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c7_default>;
+ interrupts = <GIC_SPI 608 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi7: spi@89c000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x89c000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S7_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_spi7_default>;
+ interrupts = <GIC_SPI 608 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+ };
+
+ qupv3_id_1: geniqup@ac0000 {
+ compatible = "qcom,geni-se-qup";
+ reg = <0xac0000 0x6000>;
+ clock-names = "m-ahb", "s-ahb";
+ clocks = <&gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+ <&gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ status = "disabled";
+
+ i2c8: i2c@a80000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0xa80000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S0_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c8_default>;
+ interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi8: spi@a80000 {
+ compatible = "qcom,geni-spi";
+ reg = <0xa80000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S0_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_spi8_default>;
+ interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c9: i2c@a84000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0xa84000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S1_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c9_default>;
+ interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi9: spi@a84000 {
+ compatible = "qcom,geni-spi";
+ reg = <0xa84000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S1_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_spi9_default>;
+ interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ uart9: serial@a84000 {
+ compatible = "qcom,geni-debug-uart";
+ reg = <0xa84000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S1_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_uart9_default>;
+ interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ i2c10: i2c@a88000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0xa88000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S2_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c10_default>;
+ interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi10: spi@a88000 {
+ compatible = "qcom,geni-spi";
+ reg = <0xa88000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S2_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_spi10_default>;
+ interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c11: i2c@a8c000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0xa8c000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S3_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c11_default>;
+ interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi11: spi@a8c000 {
+ compatible = "qcom,geni-spi";
+ reg = <0xa8c000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S3_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_spi11_default>;
+ interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c12: i2c@a90000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0xa90000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S4_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c12_default>;
+ interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi12: spi@a90000 {
+ compatible = "qcom,geni-spi";
+ reg = <0xa90000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S4_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_spi12_default>;
+ interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c13: i2c@a94000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0xa94000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S5_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c13_default>;
+ interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi13: spi@a94000 {
+ compatible = "qcom,geni-spi";
+ reg = <0xa94000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S5_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_spi13_default>;
+ interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c14: i2c@a98000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0xa98000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S6_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c14_default>;
+ interrupts = <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi14: spi@a98000 {
+ compatible = "qcom,geni-spi";
+ reg = <0xa98000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S6_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_spi14_default>;
+ interrupts = <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c15: i2c@a9c000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0xa9c000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S7_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c15_default>;
+ interrupts = <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi15: spi@a9c000 {
+ compatible = "qcom,geni-spi";
+ reg = <0xa9c000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S7_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_spi15_default>;
+ interrupts = <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+ };
+
+ tcsr_mutex_regs: syscon@1f40000 {
+ compatible = "syscon";
+ reg = <0x1f40000 0x40000>;
+ };
+
+ tlmm: pinctrl@3400000 {
+ compatible = "qcom,sdm845-pinctrl";
+ reg = <0x03400000 0xc00000>;
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ qup_i2c0_default: qup-i2c0-default {
+ pinmux {
+ pins = "gpio0", "gpio1";
+ function = "qup0";
+ };
+ };
+
+ qup_i2c1_default: qup-i2c1-default {
+ pinmux {
+ pins = "gpio17", "gpio18";
+ function = "qup1";
+ };
+ };
+
+ qup_i2c2_default: qup-i2c2-default {
+ pinmux {
+ pins = "gpio27", "gpio28";
+ function = "qup2";
+ };
+ };
+
+ qup_i2c3_default: qup-i2c3-default {
+ pinmux {
+ pins = "gpio41", "gpio42";
+ function = "qup3";
+ };
+ };
+
+ qup_i2c4_default: qup-i2c4-default {
+ pinmux {
+ pins = "gpio89", "gpio90";
+ function = "qup4";
+ };
+ };
+
+ qup_i2c5_default: qup-i2c5-default {
+ pinmux {
+ pins = "gpio85", "gpio86";
+ function = "qup5";
+ };
+ };
+
+ qup_i2c6_default: qup-i2c6-default {
+ pinmux {
+ pins = "gpio45", "gpio46";
+ function = "qup6";
+ };
+ };
+
+ qup_i2c7_default: qup-i2c7-default {
+ pinmux {
+ pins = "gpio93", "gpio94";
+ function = "qup7";
+ };
+ };
+
+ qup_i2c8_default: qup-i2c8-default {
+ pinmux {
+ pins = "gpio65", "gpio66";
+ function = "qup8";
+ };
+ };
+
+ qup_i2c9_default: qup-i2c9-default {
+ pinmux {
+ pins = "gpio6", "gpio7";
+ function = "qup9";
+ };
+ };
+
+ qup_i2c10_default: qup-i2c10-default {
+ pinmux {
+ pins = "gpio55", "gpio56";
+ function = "qup10";
+ };
+ };
+
+ qup_i2c11_default: qup-i2c11-default {
+ pinmux {
+ pins = "gpio31", "gpio32";
+ function = "qup11";
+ };
+ };
+
+ qup_i2c12_default: qup-i2c12-default {
+ pinmux {
+ pins = "gpio49", "gpio50";
+ function = "qup12";
+ };
+ };
+
+ qup_i2c13_default: qup-i2c13-default {
+ pinmux {
+ pins = "gpio105", "gpio106";
+ function = "qup13";
+ };
+ };
+
+ qup_i2c14_default: qup-i2c14-default {
+ pinmux {
+ pins = "gpio33", "gpio34";
+ function = "qup14";
+ };
+ };
+
+ qup_i2c15_default: qup-i2c15-default {
+ pinmux {
+ pins = "gpio81", "gpio82";
+ function = "qup15";
+ };
+ };
+
+ qup_spi0_default: qup-spi0-default {
+ pinmux {
+ pins = "gpio0", "gpio1",
+ "gpio2", "gpio3";
+ function = "qup0";
+ };
+ };
+
+ qup_spi1_default: qup-spi1-default {
+ pinmux {
+ pins = "gpio17", "gpio18",
+ "gpio19", "gpio20";
+ function = "qup1";
+ };
+ };
+
+ qup_spi2_default: qup-spi2-default {
+ pinmux {
+ pins = "gpio27", "gpio28",
+ "gpio29", "gpio30";
+ function = "qup2";
+ };
+ };
+
+ qup_spi3_default: qup-spi3-default {
+ pinmux {
+ pins = "gpio41", "gpio42",
+ "gpio43", "gpio44";
+ function = "qup3";
+ };
+ };
+
+ qup_spi4_default: qup-spi4-default {
+ pinmux {
+ pins = "gpio89", "gpio90",
+ "gpio91", "gpio92";
+ function = "qup4";
+ };
+ };
+
+ qup_spi5_default: qup-spi5-default {
+ pinmux {
+ pins = "gpio85", "gpio86",
+ "gpio87", "gpio88";
+ function = "qup5";
+ };
+ };
+
+ qup_spi6_default: qup-spi6-default {
+ pinmux {
+ pins = "gpio45", "gpio46",
+ "gpio47", "gpio48";
+ function = "qup6";
+ };
+ };
+
+ qup_spi7_default: qup-spi7-default {
+ pinmux {
+ pins = "gpio93", "gpio94",
+ "gpio95", "gpio96";
+ function = "qup7";
+ };
+ };
+
+ qup_spi8_default: qup-spi8-default {
+ pinmux {
+ pins = "gpio65", "gpio66",
+ "gpio67", "gpio68";
+ function = "qup8";
+ };
+ };
+
+ qup_spi9_default: qup-spi9-default {
+ pinmux {
+ pins = "gpio6", "gpio7",
+ "gpio4", "gpio5";
+ function = "qup9";
+ };
+ };
+
+ qup_spi10_default: qup-spi10-default {
+ pinmux {
+ pins = "gpio55", "gpio56",
+ "gpio53", "gpio54";
+ function = "qup10";
+ };
+ };
+
+ qup_spi11_default: qup-spi11-default {
+ pinmux {
+ pins = "gpio31", "gpio32",
+ "gpio33", "gpio34";
+ function = "qup11";
+ };
+ };
+
+ qup_spi12_default: qup-spi12-default {
+ pinmux {
+ pins = "gpio49", "gpio50",
+ "gpio51", "gpio52";
+ function = "qup12";
+ };
+ };
+
+ qup_spi13_default: qup-spi13-default {
+ pinmux {
+ pins = "gpio105", "gpio106",
+ "gpio107", "gpio108";
+ function = "qup13";
+ };
+ };
+
+ qup_spi14_default: qup-spi14-default {
+ pinmux {
+ pins = "gpio33", "gpio34",
+ "gpio31", "gpio32";
+ function = "qup14";
+ };
+ };
+
+ qup_spi15_default: qup-spi15-default {
+ pinmux {
+ pins = "gpio81", "gpio82",
+ "gpio83", "gpio84";
+ function = "qup15";
+ };
+ };
+
+ qup_uart9_default: qup-uart9-default {
+ pinmux {
+ pins = "gpio4", "gpio5";
+ function = "qup9";
+ };
+ };
+
+ ufs_dev_reset_assert: ufs_dev_reset_assert {
+ pins = "ufs_reset";
+ bias-pull-down; /* default: pull down */
+ /*
+ * UFS_RESET driver strengths are having
+ * different values/steps compared to typical
+ * GPIO drive strengths.
+ *
+ * Following table clarifies:
+ *
+ * HDRV value | UFS_RESET | Typical GPIO
+ * (dec) | (mA) | (mA)
+ * 0 | 0.8 | 2
+ * 1 | 1.55 | 4
+ * 2 | 2.35 | 6
+ * 3 | 3.1 | 8
+ * 4 | 3.9 | 10
+ * 5 | 4.65 | 12
+ * 6 | 5.4 | 14
+ * 7 | 6.15 | 16
+ *
+ * POR value for UFS_RESET HDRV is 3 which means
+ * 3.1mA and we want to use that. Hence just
+ * specify 8mA to "drive-strength" binding and
+ * that should result into writing 3 to HDRV
+ * field.
+ */
+ drive-strength = <8>; /* default: 3.1 mA */
+ output-low; /* active low reset */
+ };
+
+ ufs_dev_reset_deassert: ufs_dev_reset_deassert {
+ pins = "ufs_reset";
+ bias-pull-down; /* default: pull down */
+ /*
+ * default: 3.1 mA
+ * check comments under ufs_dev_reset_assert
+ */
+ drive-strength = <8>;
+ output-high; /* active low reset */
+ };
+ };
+
+ spmi_bus: spmi@c440000 {
+ compatible = "qcom,spmi-pmic-arb";
+ reg = <0xc440000 0x1100>,
+ <0xc600000 0x2000000>,
+ <0xe600000 0x100000>,
+ <0xe700000 0xa0000>,
+ <0xc40a000 0x26000>;
+ reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
+ interrupt-names = "periph_irq";
+ interrupts = <GIC_SPI 481 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,ee = <0>;
+ qcom,channel = <0>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ interrupt-controller;
+ #interrupt-cells = <4>;
+ cell-index = <0>;
+ };
+
+ apss_shared: mailbox@17990000 {
+ compatible = "qcom,sdm845-apss-shared";
+ reg = <0x17990000 0x1000>;
+ #mbox-cells = <1>;
+ };
+
+ apps_rsc: rsc@179e000 {
+ label = "apps_rsc";
+ compatible = "qcom,rpmh-rsc";
+ reg = <0x179e0000 0xd00>, <0x179e0d00 0x3000>;
+ reg-names = "drv", "tcs";
+ interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,drv-id = <2>;
+ qcom,tcs-config = <SLEEP_TCS 3>,
+ <WAKE_TCS 3>,
+ <ACTIVE_TCS 2>,
+ <CONTROL_TCS 1>;
+
+ rpmhcc: clock-controller {
+ compatible = "qcom,rpmh-clk-sdm845";
+ #clock-cells = <1>;
+ };
+
+ pm8998-rpmh-regulators {
+ compatible = "qcom,pm8998-rpmh-regulators";
+ qcom,pmic-id = "a";
+
+ pm8998_s1: smps1 {};
+ pm8998_s2: smps2 {};
+ pm8998_s3: smps3 {};
+ pm8998_s4: smps4 {};
+ pm8998_s5: smps5 {};
+ pm8998_s6: smps6 {};
+ pm8998_s7: smps7 {};
+ pm8998_s9: smps9 {};
+ pm8998_l1: ldo1 {};
+ pm8998_l2: ldo2 {};
+ pm8998_l3: ldo3 {};
+ pm8998_l4: ldo4 {};
+ pm8998_l5: ldo5 {};
+ pm8998_l6: ldo6 {};
+ pm8998_l7: ldo7 {};
+ pm8998_l8: ldo8 {};
+ pm8998_l9: ldo9 {};
+ pm8998_l10: ldo10 {};
+ pm8998_l11: ldo11 {};
+ pm8998_l12: ldo12 {};
+ pm8998_l13: ldo13 {};
+ pm8998_l14: ldo14 {};
+ pm8998_l15: ldo15 {};
+ pm8998_l16: ldo16 {};
+ pm8998_l17: ldo17 {};
+ pm8998_l18: ldo18 {};
+ pm8998_l19: ldo19 {};
+ pm8998_l20: ldo20 {};
+ pm8998_l21: ldo21 {};
+ pm8998_l22: ldo22 {};
+ pm8998_l23: ldo23 {};
+ pm8998_l24: ldo24 {};
+ pm8998_l25: ldo25 {};
+ pm8998_l26: ldo26 {};
+ pm8998_l27: ldo27 {};
+ pm8998_l28: ldo28 {};
+ pm8998_lvs1: lvs1 {};
+ pm8998_lvs2: lvs2 {};
+
+ };
+
+ pmi8998-rpmh-regulators {
+ compatible = "qcom,pmi8998-rpmh-regulators";
+ qcom,pmic-id = "b";
+
+ pmi8998_bob: bob {};
+ };
+ };
+
+ intc: interrupt-controller@17a00000 {
+ compatible = "arm,gic-v3";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ reg = <0x17a00000 0x10000>, /* GICD */
+ <0x17a60000 0x100000>; /* GICR * 8 */
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+
+ gic-its@17a40000 {
+ compatible = "arm,gic-v3-its";
+ msi-controller;
+ #msi-cells = <1>;
+ reg = <0x17a40000 0x20000>;
+ status = "disabled";
+ };
+ };
+
+ timer@17c90000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ compatible = "arm,armv7-timer-mem";
+ reg = <0x17c90000 0x1000>;
+
+ frame@17ca0000 {
+ frame-number = <0>;
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17ca0000 0x1000>,
+ <0x17cb0000 0x1000>;
+ };
+
+ frame@17cc0000 {
+ frame-number = <1>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17cc0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17cd0000 {
+ frame-number = <2>;
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17cd0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17ce0000 {
+ frame-number = <3>;
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17ce0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17cf0000 {
+ frame-number = <4>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17cf0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17d00000 {
+ frame-number = <5>;
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17d00000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17d10000 {
+ frame-number = <6>;
+ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17d10000 0x1000>;
+ status = "disabled";
+ };
+ };
+
+ ufshc: ufshc@1d84000 {
+ compatible = "qcom,ufshc";
+ reg = <0x1d84000 0x2500>;
+ interrupts = <0 265 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&ufsphy_0>, <&ufsphy_1>;
+ phy-names = "ufsphy_0", "ufsphy_1";
+
+ lanes-per-direction = <2>;
+ dev-ref-clk-freq = <0>; /* 19.2 MHz */
+
+ power-domains = <&gcc UFS_PHY_GDSC>;
+
+ clock-names =
+ "core_clk",
+ "bus_aggr_clk",
+ "iface_clk",
+ "core_clk_unipro",
+ "ref_clk",
+ "tx_lane0_sync_clk",
+ "rx_lane0_sync_clk",
+ "rx_lane1_sync_clk";
+ clocks =
+ <&gcc GCC_UFS_PHY_AXI_CLK>,
+ <&gcc GCC_AGGRE_UFS_PHY_AXI_CLK>,
+ <&gcc GCC_UFS_PHY_AHB_CLK>,
+ <&gcc GCC_UFS_PHY_UNIPRO_CORE_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_UFS_PHY_TX_SYMBOL_0_CLK>,
+ <&gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>,
+ <&gcc GCC_UFS_PHY_RX_SYMBOL_1_CLK>;
+ freq-table-hz =
+ <50000000 200000000>,
+ <0 0>,
+ <0 0>,
+ <37500000 150000000>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>;
+
+ non-removable;
+ pinctrl-names = "dev-reset-assert", "dev-reset-deassert";
+ pinctrl-0 = <&ufs_dev_reset_assert>;
+ pinctrl-1 = <&ufs_dev_reset_deassert>;
+
+ resets = <&gcc GCC_UFS_PHY_BCR>;
+ reset-names = "core_reset";
+
+ status = "disabled";
+ };
+
+ ufsphy: phy@1d87000 {
+ compatible = "qcom,sdm845-qmp-ufs-phy";
+ reg = <0x1d87000 0x18c>;
+ #clock-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clock-names = "ref_clk_src",
+ "ref_clk",
+ "ref_aux_clk";
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_UFS_MEM_CLKREF_CLK>,
+ <&gcc GCC_UFS_PHY_PHY_AUX_CLK>;
+
+ status = "disabled";
+
+ ufsphy_0: lane@0x1d87400 {
+ reg = <0x1d87400 0x108>,
+ <0x1d87600 0x1e0>,
+ <0x1d87c00 0x1dc>;
+ #phy-cells = <0>;
+ };
+
+ ufsphy_1: lane@0x1d87800 {
+ reg = <0x1d87800 0x108>,
+ <0x1d87a00 0x1e0>,
+ <0x1d87c00 0x1dc>;
+ #phy-cells = <0>;
+ };
+ };
+ };
+};
diff --git a/rr-cache/dda7250f4f5d0e7703ebbcda2aed73cea685cc09/preimage b/rr-cache/dda7250f4f5d0e7703ebbcda2aed73cea685cc09/preimage
new file mode 100644
index 0000000..d29bbda
--- /dev/null
+++ b/rr-cache/dda7250f4f5d0e7703ebbcda2aed73cea685cc09/preimage
@@ -0,0 +1,1256 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SDM845 SoC device tree source
+ *
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+ interrupt-parent = <&intc>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ aliases {
+ i2c0 = &i2c0;
+ i2c1 = &i2c1;
+ i2c2 = &i2c2;
+ i2c3 = &i2c3;
+ i2c4 = &i2c4;
+ i2c5 = &i2c5;
+ i2c6 = &i2c6;
+ i2c7 = &i2c7;
+ i2c8 = &i2c8;
+ i2c9 = &i2c9;
+ i2c10 = &i2c10;
+ i2c11 = &i2c11;
+ i2c12 = &i2c12;
+ i2c13 = &i2c13;
+ i2c14 = &i2c14;
+ i2c15 = &i2c15;
+ spi0 = &spi0;
+ spi1 = &spi1;
+ spi2 = &spi2;
+ spi3 = &spi3;
+ spi4 = &spi4;
+ spi5 = &spi5;
+ spi6 = &spi6;
+ spi7 = &spi7;
+ spi8 = &spi8;
+ spi9 = &spi9;
+ spi10 = &spi10;
+ spi11 = &spi11;
+ spi12 = &spi12;
+ spi13 = &spi13;
+ spi14 = &spi14;
+ spi15 = &spi15;
+ };
+
+ chosen { };
+
+ memory@80000000 {
+ device_type = "memory";
+ /* We expect the bootloader to fill in the size */
+ reg = <0 0x80000000 0 0>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ memory@85fc0000 {
+ reg = <0 0x85fc0000 0 0x20000>;
+ no-map;
+ };
+
+ memory@85fe0000 {
+ compatible = "qcom,cmd-db";
+ reg = <0x0 0x85fe0000 0x0 0x20000>;
+ no-map;
+ };
+
+ smem_mem: memory@86000000 {
+ reg = <0x0 0x86000000 0x0 0x200000>;
+ no-map;
+ };
+
+ memory@86200000 {
+ reg = <0 0x86200000 0 0x2d00000>;
+ no-map;
+ };
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ CPU0: cpu@0 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ next-level-cache = <&L2_0>;
+ L2_0: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ L3_0: l3-cache {
+ compatible = "cache";
+ };
+ };
+ };
+
+ CPU1: cpu@100 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x100>;
+ enable-method = "psci";
+ next-level-cache = <&L2_100>;
+ L2_100: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU2: cpu@200 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x200>;
+ enable-method = "psci";
+ next-level-cache = <&L2_200>;
+ L2_200: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU3: cpu@300 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x300>;
+ enable-method = "psci";
+ next-level-cache = <&L2_300>;
+ L2_300: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU4: cpu@400 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x400>;
+ enable-method = "psci";
+ next-level-cache = <&L2_400>;
+ L2_400: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU5: cpu@500 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x500>;
+ enable-method = "psci";
+ next-level-cache = <&L2_500>;
+ L2_500: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU6: cpu@600 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x600>;
+ enable-method = "psci";
+ next-level-cache = <&L2_600>;
+ L2_600: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU7: cpu@700 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x700>;
+ enable-method = "psci";
+ next-level-cache = <&L2_700>;
+ L2_700: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 1 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 2 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 3 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 0 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ clocks {
+ xo_board: xo-board {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <38400000>;
+ clock-output-names = "xo_board";
+ };
+
+ sleep_clk: sleep-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32764>;
+ };
+ };
+
+ tcsr_mutex: hwlock {
+ compatible = "qcom,tcsr-mutex";
+ syscon = <&tcsr_mutex_regs 0 0x1000>;
+ #hwlock-cells = <1>;
+ };
+
+ smem {
+ compatible = "qcom,smem";
+ memory-region = <&smem_mem>;
+ hwlocks = <&tcsr_mutex 3>;
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ soc: soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0xffffffff>;
+ compatible = "simple-bus";
+
+ gcc: clock-controller@100000 {
+ compatible = "qcom,gcc-sdm845";
+ reg = <0x100000 0x1f0000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
+ qupv3_id_0: geniqup@8c0000 {
+ compatible = "qcom,geni-se-qup";
+ reg = <0x8c0000 0x6000>;
+ clock-names = "m-ahb", "s-ahb";
+ clocks = <&gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+ <&gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ i2c0: i2c@880000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x880000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S0_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c0_default>;
+ interrupts = <GIC_SPI 601 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi0: spi@880000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x880000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S0_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_spi0_default>;
+ interrupts = <GIC_SPI 601 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@884000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x884000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S1_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c1_default>;
+ interrupts = <GIC_SPI 602 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi1: spi@884000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x884000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S1_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_spi1_default>;
+ interrupts = <GIC_SPI 602 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@888000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x888000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S2_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c2_default>;
+ interrupts = <GIC_SPI 603 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi2: spi@888000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x888000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S2_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_spi2_default>;
+ interrupts = <GIC_SPI 603 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@88c000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x88c000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S3_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c3_default>;
+ interrupts = <GIC_SPI 604 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi3: spi@88c000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x88c000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S3_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_spi3_default>;
+ interrupts = <GIC_SPI 604 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c4: i2c@890000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x890000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S4_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c4_default>;
+ interrupts = <GIC_SPI 605 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi4: spi@890000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x890000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S4_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_spi4_default>;
+ interrupts = <GIC_SPI 605 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c5: i2c@894000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x894000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S5_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c5_default>;
+ interrupts = <GIC_SPI 606 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi5: spi@894000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x894000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S5_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_spi5_default>;
+ interrupts = <GIC_SPI 606 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c6: i2c@898000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x898000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S6_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c6_default>;
+ interrupts = <GIC_SPI 607 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi6: spi@898000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x898000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S6_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_spi6_default>;
+ interrupts = <GIC_SPI 607 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c7: i2c@89c000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x89c000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S7_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c7_default>;
+ interrupts = <GIC_SPI 608 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi7: spi@89c000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x89c000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S7_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_spi7_default>;
+ interrupts = <GIC_SPI 608 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+ };
+
+ qupv3_id_1: geniqup@ac0000 {
+ compatible = "qcom,geni-se-qup";
+ reg = <0xac0000 0x6000>;
+ clock-names = "m-ahb", "s-ahb";
+ clocks = <&gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+ <&gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ status = "disabled";
+
+ i2c8: i2c@a80000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0xa80000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S0_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c8_default>;
+ interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi8: spi@a80000 {
+ compatible = "qcom,geni-spi";
+ reg = <0xa80000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S0_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_spi8_default>;
+ interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c9: i2c@a84000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0xa84000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S1_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c9_default>;
+ interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi9: spi@a84000 {
+ compatible = "qcom,geni-spi";
+ reg = <0xa84000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S1_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_spi9_default>;
+ interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ uart9: serial@a84000 {
+ compatible = "qcom,geni-debug-uart";
+ reg = <0xa84000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S1_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_uart9_default>;
+ interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ i2c10: i2c@a88000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0xa88000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S2_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c10_default>;
+ interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi10: spi@a88000 {
+ compatible = "qcom,geni-spi";
+ reg = <0xa88000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S2_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_spi10_default>;
+ interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c11: i2c@a8c000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0xa8c000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S3_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c11_default>;
+ interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi11: spi@a8c000 {
+ compatible = "qcom,geni-spi";
+ reg = <0xa8c000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S3_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_spi11_default>;
+ interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c12: i2c@a90000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0xa90000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S4_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c12_default>;
+ interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi12: spi@a90000 {
+ compatible = "qcom,geni-spi";
+ reg = <0xa90000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S4_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_spi12_default>;
+ interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c13: i2c@a94000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0xa94000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S5_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c13_default>;
+ interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi13: spi@a94000 {
+ compatible = "qcom,geni-spi";
+ reg = <0xa94000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S5_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_spi13_default>;
+ interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c14: i2c@a98000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0xa98000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S6_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c14_default>;
+ interrupts = <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi14: spi@a98000 {
+ compatible = "qcom,geni-spi";
+ reg = <0xa98000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S6_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_spi14_default>;
+ interrupts = <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c15: i2c@a9c000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0xa9c000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S7_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c15_default>;
+ interrupts = <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi15: spi@a9c000 {
+ compatible = "qcom,geni-spi";
+ reg = <0xa9c000 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S7_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_spi15_default>;
+ interrupts = <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+ };
+
+ tcsr_mutex_regs: syscon@1f40000 {
+ compatible = "syscon";
+ reg = <0x1f40000 0x40000>;
+ };
+
+ tlmm: pinctrl@3400000 {
+ compatible = "qcom,sdm845-pinctrl";
+ reg = <0x03400000 0xc00000>;
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+<<<<<<<
+ qup_i2c0_default: qup-i2c0-default {
+ pinmux {
+ pins = "gpio0", "gpio1";
+ function = "qup0";
+ };
+ };
+
+ qup_i2c1_default: qup-i2c1-default {
+ pinmux {
+ pins = "gpio17", "gpio18";
+ function = "qup1";
+ };
+ };
+
+ qup_i2c2_default: qup-i2c2-default {
+ pinmux {
+ pins = "gpio27", "gpio28";
+ function = "qup2";
+ };
+ };
+
+ qup_i2c3_default: qup-i2c3-default {
+ pinmux {
+ pins = "gpio41", "gpio42";
+ function = "qup3";
+ };
+ };
+
+ qup_i2c4_default: qup-i2c4-default {
+ pinmux {
+ pins = "gpio89", "gpio90";
+ function = "qup4";
+ };
+ };
+
+ qup_i2c5_default: qup-i2c5-default {
+ pinmux {
+ pins = "gpio85", "gpio86";
+ function = "qup5";
+ };
+ };
+
+ qup_i2c6_default: qup-i2c6-default {
+ pinmux {
+ pins = "gpio45", "gpio46";
+ function = "qup6";
+ };
+ };
+
+ qup_i2c7_default: qup-i2c7-default {
+ pinmux {
+ pins = "gpio93", "gpio94";
+ function = "qup7";
+ };
+ };
+
+ qup_i2c8_default: qup-i2c8-default {
+ pinmux {
+ pins = "gpio65", "gpio66";
+ function = "qup8";
+ };
+ };
+
+ qup_i2c9_default: qup-i2c9-default {
+ pinmux {
+ pins = "gpio6", "gpio7";
+ function = "qup9";
+ };
+ };
+
+ qup_i2c10_default: qup-i2c10-default {
+ pinmux {
+ pins = "gpio55", "gpio56";
+ function = "qup10";
+ };
+ };
+
+ qup_i2c11_default: qup-i2c11-default {
+ pinmux {
+ pins = "gpio31", "gpio32";
+ function = "qup11";
+ };
+ };
+
+ qup_i2c12_default: qup-i2c12-default {
+ pinmux {
+ pins = "gpio49", "gpio50";
+ function = "qup12";
+ };
+ };
+
+ qup_i2c13_default: qup-i2c13-default {
+ pinmux {
+ pins = "gpio105", "gpio106";
+ function = "qup13";
+ };
+ };
+
+ qup_i2c14_default: qup-i2c14-default {
+ pinmux {
+ pins = "gpio33", "gpio34";
+ function = "qup14";
+ };
+ };
+
+ qup_i2c15_default: qup-i2c15-default {
+ pinmux {
+ pins = "gpio81", "gpio82";
+ function = "qup15";
+ };
+ };
+
+ qup_spi0_default: qup-spi0-default {
+ pinmux {
+ pins = "gpio0", "gpio1",
+ "gpio2", "gpio3";
+ function = "qup0";
+ };
+ };
+
+ qup_spi1_default: qup-spi1-default {
+ pinmux {
+ pins = "gpio17", "gpio18",
+ "gpio19", "gpio20";
+ function = "qup1";
+ };
+ };
+
+ qup_spi2_default: qup-spi2-default {
+ pinmux {
+ pins = "gpio27", "gpio28",
+ "gpio29", "gpio30";
+ function = "qup2";
+ };
+ };
+
+ qup_spi3_default: qup-spi3-default {
+ pinmux {
+ pins = "gpio41", "gpio42",
+ "gpio43", "gpio44";
+ function = "qup3";
+ };
+ };
+
+ qup_spi4_default: qup-spi4-default {
+ pinmux {
+ pins = "gpio89", "gpio90",
+ "gpio91", "gpio92";
+ function = "qup4";
+ };
+ };
+
+ qup_spi5_default: qup-spi5-default {
+ pinmux {
+ pins = "gpio85", "gpio86",
+ "gpio87", "gpio88";
+ function = "qup5";
+ };
+ };
+
+ qup_spi6_default: qup-spi6-default {
+ pinmux {
+ pins = "gpio45", "gpio46",
+ "gpio47", "gpio48";
+ function = "qup6";
+ };
+ };
+
+ qup_spi7_default: qup-spi7-default {
+ pinmux {
+ pins = "gpio93", "gpio94",
+ "gpio95", "gpio96";
+ function = "qup7";
+ };
+ };
+
+ qup_spi8_default: qup-spi8-default {
+ pinmux {
+ pins = "gpio65", "gpio66",
+ "gpio67", "gpio68";
+ function = "qup8";
+ };
+ };
+
+ qup_spi9_default: qup-spi9-default {
+ pinmux {
+ pins = "gpio6", "gpio7",
+ "gpio4", "gpio5";
+ function = "qup9";
+ };
+ };
+
+ qup_spi10_default: qup-spi10-default {
+ pinmux {
+ pins = "gpio55", "gpio56",
+ "gpio53", "gpio54";
+ function = "qup10";
+ };
+ };
+
+ qup_spi11_default: qup-spi11-default {
+ pinmux {
+ pins = "gpio31", "gpio32",
+ "gpio33", "gpio34";
+ function = "qup11";
+ };
+ };
+
+ qup_spi12_default: qup-spi12-default {
+ pinmux {
+ pins = "gpio49", "gpio50",
+ "gpio51", "gpio52";
+ function = "qup12";
+ };
+ };
+
+ qup_spi13_default: qup-spi13-default {
+ pinmux {
+ pins = "gpio105", "gpio106",
+ "gpio107", "gpio108";
+ function = "qup13";
+ };
+ };
+
+ qup_spi14_default: qup-spi14-default {
+ pinmux {
+ pins = "gpio33", "gpio34",
+ "gpio31", "gpio32";
+ function = "qup14";
+ };
+ };
+
+ qup_spi15_default: qup-spi15-default {
+ pinmux {
+ pins = "gpio81", "gpio82",
+ "gpio83", "gpio84";
+ function = "qup15";
+ };
+ };
+
+ qup_uart9_default: qup-uart9-default {
+ pinmux {
+ pins = "gpio4", "gpio5";
+ function = "qup9";
+ };
+=======
+ ufs_dev_reset_assert: ufs_dev_reset_assert {
+ pins = "ufs_reset";
+ bias-pull-down; /* default: pull down */
+ /*
+ * UFS_RESET driver strengths are having
+ * different values/steps compared to typical
+ * GPIO drive strengths.
+ *
+ * Following table clarifies:
+ *
+ * HDRV value | UFS_RESET | Typical GPIO
+ * (dec) | (mA) | (mA)
+ * 0 | 0.8 | 2
+ * 1 | 1.55 | 4
+ * 2 | 2.35 | 6
+ * 3 | 3.1 | 8
+ * 4 | 3.9 | 10
+ * 5 | 4.65 | 12
+ * 6 | 5.4 | 14
+ * 7 | 6.15 | 16
+ *
+ * POR value for UFS_RESET HDRV is 3 which means
+ * 3.1mA and we want to use that. Hence just
+ * specify 8mA to "drive-strength" binding and
+ * that should result into writing 3 to HDRV
+ * field.
+ */
+ drive-strength = <8>; /* default: 3.1 mA */
+ output-low; /* active low reset */
+ };
+
+ ufs_dev_reset_deassert: ufs_dev_reset_deassert {
+ pins = "ufs_reset";
+ bias-pull-down; /* default: pull down */
+ /*
+ * default: 3.1 mA
+ * check comments under ufs_dev_reset_assert
+ */
+ drive-strength = <8>;
+ output-high; /* active low reset */
+>>>>>>>
+ };
+ };
+
+ spmi_bus: spmi@c440000 {
+ compatible = "qcom,spmi-pmic-arb";
+ reg = <0xc440000 0x1100>,
+ <0xc600000 0x2000000>,
+ <0xe600000 0x100000>,
+ <0xe700000 0xa0000>,
+ <0xc40a000 0x26000>;
+ reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
+ interrupt-names = "periph_irq";
+ interrupts = <GIC_SPI 481 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,ee = <0>;
+ qcom,channel = <0>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ interrupt-controller;
+ #interrupt-cells = <4>;
+ cell-index = <0>;
+ };
+
+ apss_shared: mailbox@17990000 {
+ compatible = "qcom,sdm845-apss-shared";
+ reg = <0x17990000 0x1000>;
+ #mbox-cells = <1>;
+ };
+
+ apps_rsc: rsc@179e000 {
+ label = "apps_rsc";
+ compatible = "qcom,rpmh-rsc";
+ reg = <0x179e0000 0xd00>, <0x179e0d00 0x3000>;
+ reg-names = "drv", "tcs";
+ interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,drv-id = <2>;
+ qcom,tcs-config = <SLEEP_TCS 3>,
+ <WAKE_TCS 3>,
+ <ACTIVE_TCS 2>,
+ <CONTROL_TCS 1>;
+
+ rpmhcc: clock-controller {
+ compatible = "qcom,rpmh-clk-sdm845";
+ #clock-cells = <1>;
+ };
+
+ pm8998-rpmh-regulators {
+ compatible = "qcom,pm8998-rpmh-regulators";
+ qcom,pmic-id = "a";
+
+ pm8998_s1: smps1 {};
+ pm8998_s2: smps2 {};
+ pm8998_s3: smps3 {};
+ pm8998_s4: smps4 {};
+ pm8998_s5: smps5 {};
+ pm8998_s6: smps6 {};
+ pm8998_s7: smps7 {};
+ pm8998_s9: smps9 {};
+ pm8998_l1: ldo1 {};
+ pm8998_l2: ldo2 {};
+ pm8998_l3: ldo3 {};
+ pm8998_l4: ldo4 {};
+ pm8998_l5: ldo5 {};
+ pm8998_l6: ldo6 {};
+ pm8998_l7: ldo7 {};
+ pm8998_l8: ldo8 {};
+ pm8998_l9: ldo9 {};
+ pm8998_l10: ldo10 {};
+ pm8998_l11: ldo11 {};
+ pm8998_l12: ldo12 {};
+ pm8998_l13: ldo13 {};
+ pm8998_l14: ldo14 {};
+ pm8998_l15: ldo15 {};
+ pm8998_l16: ldo16 {};
+ pm8998_l17: ldo17 {};
+ pm8998_l18: ldo18 {};
+ pm8998_l19: ldo19 {};
+ pm8998_l20: ldo20 {};
+ pm8998_l21: ldo21 {};
+ pm8998_l22: ldo22 {};
+ pm8998_l23: ldo23 {};
+ pm8998_l24: ldo24 {};
+ pm8998_l25: ldo25 {};
+ pm8998_l26: ldo26 {};
+ pm8998_l27: ldo27 {};
+ pm8998_l28: ldo28 {};
+ pm8998_lvs1: lvs1 {};
+ pm8998_lvs2: lvs2 {};
+
+ };
+
+ pmi8998-rpmh-regulators {
+ compatible = "qcom,pmi8998-rpmh-regulators";
+ qcom,pmic-id = "b";
+
+ pmi8998_bob: bob {};
+ };
+ };
+
+ intc: interrupt-controller@17a00000 {
+ compatible = "arm,gic-v3";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ reg = <0x17a00000 0x10000>, /* GICD */
+ <0x17a60000 0x100000>; /* GICR * 8 */
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+
+ gic-its@17a40000 {
+ compatible = "arm,gic-v3-its";
+ msi-controller;
+ #msi-cells = <1>;
+ reg = <0x17a40000 0x20000>;
+ status = "disabled";
+ };
+ };
+
+ timer@17c90000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ compatible = "arm,armv7-timer-mem";
+ reg = <0x17c90000 0x1000>;
+
+ frame@17ca0000 {
+ frame-number = <0>;
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17ca0000 0x1000>,
+ <0x17cb0000 0x1000>;
+ };
+
+ frame@17cc0000 {
+ frame-number = <1>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17cc0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17cd0000 {
+ frame-number = <2>;
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17cd0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17ce0000 {
+ frame-number = <3>;
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17ce0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17cf0000 {
+ frame-number = <4>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17cf0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17d00000 {
+ frame-number = <5>;
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17d00000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17d10000 {
+ frame-number = <6>;
+ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17d10000 0x1000>;
+ status = "disabled";
+ };
+ };
+
+ ufshc: ufshc@1d84000 {
+ compatible = "qcom,ufshc";
+ reg = <0x1d84000 0x2500>;
+ interrupts = <0 265 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&ufsphy_0>, <&ufsphy_1>;
+ phy-names = "ufsphy_0", "ufsphy_1";
+
+ lanes-per-direction = <2>;
+ dev-ref-clk-freq = <0>; /* 19.2 MHz */
+
+ power-domains = <&gcc UFS_PHY_GDSC>;
+
+ clock-names =
+ "core_clk",
+ "bus_aggr_clk",
+ "iface_clk",
+ "core_clk_unipro",
+ "ref_clk",
+ "tx_lane0_sync_clk",
+ "rx_lane0_sync_clk",
+ "rx_lane1_sync_clk";
+ clocks =
+ <&gcc GCC_UFS_PHY_AXI_CLK>,
+ <&gcc GCC_AGGRE_UFS_PHY_AXI_CLK>,
+ <&gcc GCC_UFS_PHY_AHB_CLK>,
+ <&gcc GCC_UFS_PHY_UNIPRO_CORE_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_UFS_PHY_TX_SYMBOL_0_CLK>,
+ <&gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>,
+ <&gcc GCC_UFS_PHY_RX_SYMBOL_1_CLK>;
+ freq-table-hz =
+ <50000000 200000000>,
+ <0 0>,
+ <0 0>,
+ <37500000 150000000>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>;
+
+ non-removable;
+ pinctrl-names = "dev-reset-assert", "dev-reset-deassert";
+ pinctrl-0 = <&ufs_dev_reset_assert>;
+ pinctrl-1 = <&ufs_dev_reset_deassert>;
+
+ resets = <&gcc GCC_UFS_PHY_BCR>;
+ reset-names = "core_reset";
+
+ status = "disabled";
+ };
+
+ ufsphy: phy@1d87000 {
+ compatible = "qcom,sdm845-qmp-ufs-phy";
+ reg = <0x1d87000 0x18c>;
+ #clock-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clock-names = "ref_clk_src",
+ "ref_clk",
+ "ref_aux_clk";
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_UFS_MEM_CLKREF_CLK>,
+ <&gcc GCC_UFS_PHY_PHY_AUX_CLK>;
+
+ status = "disabled";
+
+ ufsphy_0: lane@0x1d87400 {
+ reg = <0x1d87400 0x108>,
+ <0x1d87600 0x1e0>,
+ <0x1d87c00 0x1dc>;
+ #phy-cells = <0>;
+ };
+
+ ufsphy_1: lane@0x1d87800 {
+ reg = <0x1d87800 0x108>,
+ <0x1d87a00 0x1e0>,
+ <0x1d87c00 0x1dc>;
+ #phy-cells = <0>;
+ };
+ };
+ };
+};
diff --git a/rr-cache/df72d29db70d6c63c309680f5104de95fee0c2ce/postimage b/rr-cache/df72d29db70d6c63c309680f5104de95fee0c2ce/postimage
new file mode 100644
index 0000000..5002dd0
--- /dev/null
+++ b/rr-cache/df72d29db70d6c63c309680f5104de95fee0c2ce/postimage
@@ -0,0 +1,1303 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+// Copyright (c) 2018, Linaro Limited
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/pcm_params.h>
+#include "q6afe.h"
+
+#define Q6AFE_TDM_PB_DAI(pre, num, did) { \
+ .playback = { \
+ .stream_name = pre" TDM"#num" Playback", \
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |\
+ SNDRV_PCM_RATE_176400, \
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE, \
+ .channels_min = 1, \
+ .channels_max = 8, \
+ .rate_min = 8000, \
+ .rate_max = 176400, \
+ }, \
+ .name = #did, \
+ .ops = &q6tdm_ops, \
+ .id = did, \
+ .probe = msm_dai_q6_dai_probe, \
+ .remove = msm_dai_q6_dai_remove, \
+ }
+
+#define Q6AFE_TDM_CAP_DAI(pre, num, did) { \
+ .capture = { \
+ .stream_name = pre" TDM"#num" Capture", \
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |\
+ SNDRV_PCM_RATE_176400, \
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE, \
+ .channels_min = 1, \
+ .channels_max = 8, \
+ .rate_min = 8000, \
+ .rate_max = 176400, \
+ }, \
+ .name = #did, \
+ .ops = &q6tdm_ops, \
+ .id = did, \
+ .probe = msm_dai_q6_dai_probe, \
+ .remove = msm_dai_q6_dai_remove, \
+ }
+
+struct q6afe_dai_priv_data {
+ uint32_t sd_line_mask;
+ uint32_t sync_mode;
+ uint32_t sync_src;
+ uint32_t data_out_enable;
+ uint32_t invert_sync;
+ uint32_t data_delay;
+ uint32_t data_align;
+};
+
+struct q6afe_dai_data {
+ struct q6afe_port *port[AFE_PORT_MAX];
+ struct q6afe_port_config port_config[AFE_PORT_MAX];
+ bool is_port_started[AFE_PORT_MAX];
+ struct q6afe_dai_priv_data priv[AFE_PORT_MAX];
+};
+
+static int q6slim_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
+ struct q6afe_slim_cfg *slim = &dai_data->port_config[dai->id].slim;
+
+ slim->num_channels = params_channels(params);
+ slim->sample_rate = params_rate(params);
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ case SNDRV_PCM_FORMAT_SPECIAL:
+ slim->bit_width = 16;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ slim->bit_width = 24;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ slim->bit_width = 32;
+ break;
+ default:
+ pr_err("%s: format %d\n",
+ __func__, params_format(params));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int q6hdmi_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
+ int channels = params_channels(params);
+ struct q6afe_hdmi_cfg *hdmi = &dai_data->port_config[dai->id].hdmi;
+
+ hdmi->sample_rate = params_rate(params);
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ hdmi->bit_width = 16;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ hdmi->bit_width = 24;
+ break;
+ }
+
+ /* HDMI spec CEA-861-E: Table 28 Audio InfoFrame Data Byte 4 */
+ switch (channels) {
+ case 2:
+ hdmi->channel_allocation = 0;
+ break;
+ case 3:
+ hdmi->channel_allocation = 0x02;
+ break;
+ case 4:
+ hdmi->channel_allocation = 0x06;
+ break;
+ case 5:
+ hdmi->channel_allocation = 0x0A;
+ break;
+ case 6:
+ hdmi->channel_allocation = 0x0B;
+ break;
+ case 7:
+ hdmi->channel_allocation = 0x12;
+ break;
+ case 8:
+ hdmi->channel_allocation = 0x13;
+ break;
+ default:
+ dev_err(dai->dev, "invalid Channels = %u\n", channels);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int q6i2s_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
+ struct q6afe_i2s_cfg *i2s = &dai_data->port_config[dai->id].i2s_cfg;
+
+ i2s->sample_rate = params_rate(params);
+ i2s->bit_width = params_width(params);
+ i2s->num_channels = params_channels(params);
+ i2s->sd_line_mask = dai_data->priv[dai->id].sd_line_mask;
+
+ return 0;
+}
+
+static int q6i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
+ struct q6afe_i2s_cfg *i2s = &dai_data->port_config[dai->id].i2s_cfg;
+
+ i2s->fmt = fmt;
+
+ return 0;
+}
+
+static int q6tdm_set_tdm_slot(struct snd_soc_dai *dai,
+ unsigned int tx_mask,
+ unsigned int rx_mask,
+ int slots, int slot_width)
+{
+
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
+ struct q6afe_tdm_cfg *tdm = &dai_data->port_config[dai->id].tdm;
+ unsigned int cap_mask;
+ int rc = 0;
+
+ /* HW only supports 16 and 32 bit slot width configuration */
+ if ((slot_width != 16) && (slot_width != 32)) {
+ dev_err(dai->dev, "%s: invalid slot_width %d\n",
+ __func__, slot_width);
+ return -EINVAL;
+ }
+
+ /* HW supports 1-32 slots configuration. Typical: 1, 2, 4, 8, 16, 32 */
+ switch (slots) {
+ case 2:
+ cap_mask = 0x03;
+ break;
+ case 4:
+ cap_mask = 0x0F;
+ break;
+ case 8:
+ cap_mask = 0xFF;
+ break;
+ case 16:
+ cap_mask = 0xFFFF;
+ break;
+ default:
+ dev_err(dai->dev, "%s: invalid slots %d\n",
+ __func__, slots);
+ return -EINVAL;
+ }
+
+ switch (dai->id) {
+ case PRIMARY_TDM_RX_0 ... QUINARY_TDM_TX_7:
+ tdm->nslots_per_frame = slots;
+ tdm->slot_width = slot_width;
+ /* TDM RX dais ids are even and tx are odd */
+ tdm->slot_mask = (dai->id & 0x1 ? tx_mask : rx_mask) & cap_mask;
+ break;
+ default:
+ dev_err(dai->dev, "%s: invalid dai id 0x%x\n",
+ __func__, dai->id);
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int q6tdm_set_channel_map(struct snd_soc_dai *dai,
+ unsigned int tx_num, unsigned int *tx_slot,
+ unsigned int rx_num, unsigned int *rx_slot)
+{
+
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
+ struct q6afe_tdm_cfg *tdm = &dai_data->port_config[dai->id].tdm;
+ int rc = 0;
+ int i = 0;
+
+ switch (dai->id) {
+ case PRIMARY_TDM_RX_0 ... QUINARY_TDM_TX_7:
+ if (dai->id & 0x1) {
+ if (!tx_slot) {
+ dev_err(dai->dev, "tx slot not found\n");
+ return -EINVAL;
+ }
+ if (tx_num > AFE_PORT_MAX_AUDIO_CHAN_CNT) {
+ dev_err(dai->dev, "invalid tx num %d\n",
+ tx_num);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < tx_num; i++)
+ tdm->ch_mapping[i] = tx_slot[i];
+
+ for (i = tx_num; i < AFE_PORT_MAX_AUDIO_CHAN_CNT; i++)
+ tdm->ch_mapping[i] = Q6AFE_CMAP_INVALID;
+
+ tdm->num_channels = tx_num;
+ } else {
+ /* rx */
+ if (!rx_slot) {
+ dev_err(dai->dev, "rx slot not found\n");
+ return -EINVAL;
+ }
+ if (rx_num > AFE_PORT_MAX_AUDIO_CHAN_CNT) {
+ dev_err(dai->dev, "invalid rx num %d\n",
+ rx_num);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < rx_num; i++)
+ tdm->ch_mapping[i] = rx_slot[i];
+
+ for (i = rx_num; i < AFE_PORT_MAX_AUDIO_CHAN_CNT; i++)
+ tdm->ch_mapping[i] = Q6AFE_CMAP_INVALID;
+
+ tdm->num_channels = rx_num;
+ }
+
+ break;
+ default:
+ dev_err(dai->dev, "%s: invalid dai id 0x%x\n",
+ __func__, dai->id);
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int q6tdm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
+ struct q6afe_tdm_cfg *tdm = &dai_data->port_config[dai->id].tdm;
+
+ tdm->bit_width = params_width(params);
+ tdm->sample_rate = params_rate(params);
+ tdm->num_channels = params_channels(params);
+ tdm->data_align_type = dai_data->priv[dai->id].data_align;
+ tdm->sync_src = dai_data->priv[dai->id].sync_src;
+ tdm->sync_mode = dai_data->priv[dai->id].sync_mode;
+
+ return 0;
+}
+static void q6afe_dai_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
+ int rc;
+
+ rc = q6afe_port_stop(dai_data->port[dai->id]);
+ if (rc < 0)
+ dev_err(dai->dev, "fail to close AFE port (%d)\n", rc);
+
+ dai_data->is_port_started[dai->id] = false;
+
+}
+
+static int q6afe_dai_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
+ int rc;
+
+ if (dai_data->is_port_started[dai->id]) {
+ /* stop the port and restart with new port config */
+ rc = q6afe_port_stop(dai_data->port[dai->id]);
+ if (rc < 0) {
+ dev_err(dai->dev, "fail to close AFE port (%d)\n", rc);
+ return rc;
+ }
+ }
+
+ switch (dai->id) {
+ case HDMI_RX:
+ q6afe_hdmi_port_prepare(dai_data->port[dai->id],
+ &dai_data->port_config[dai->id].hdmi);
+ break;
+ case SLIMBUS_0_RX ... SLIMBUS_6_TX:
+ q6afe_slim_port_prepare(dai_data->port[dai->id],
+ &dai_data->port_config[dai->id].slim);
+ break;
+ case PRIMARY_MI2S_RX ... QUATERNARY_MI2S_TX:
+ rc = q6afe_i2s_port_prepare(dai_data->port[dai->id],
+ &dai_data->port_config[dai->id].i2s_cfg);
+ if (rc < 0) {
+ dev_err(dai->dev, "fail to prepare AFE port %x\n",
+ dai->id);
+ return rc;
+ }
+ break;
+ case PRIMARY_TDM_RX_0 ... QUINARY_TDM_TX_7:
+ q6afe_tdm_port_prepare(dai_data->port[dai->id],
+ &dai_data->port_config[dai->id].tdm);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rc = q6afe_port_start(dai_data->port[dai->id]);
+ if (rc < 0) {
+ dev_err(dai->dev, "fail to start AFE port %x\n", dai->id);
+ return rc;
+ }
+ dai_data->is_port_started[dai->id] = true;
+
+ return 0;
+}
+
+static int q6slim_set_channel_map(struct snd_soc_dai *dai,
+ unsigned int tx_num, unsigned int *tx_slot,
+ unsigned int rx_num, unsigned int *rx_slot)
+{
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
+ struct q6afe_port_config *pcfg = &dai_data->port_config[dai->id];
+ int i;
+
+ if (!rx_slot) {
+ pr_err("%s: rx slot not found\n", __func__);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < rx_num; i++) {
+ pcfg->slim.ch_mapping[i] = rx_slot[i];
+ pr_debug("%s: find number of channels[%d] ch[%d]\n",
+ __func__, i, rx_slot[i]);
+ }
+
+ pcfg->slim.num_channels = rx_num;
+
+ pr_debug("%s: SLIMBUS_%d_RX cnt[%d] ch[%d %d]\n", __func__,
+ (dai->id - SLIMBUS_0_RX) / 2, rx_num,
+ pcfg->slim.ch_mapping[0],
+ pcfg->slim.ch_mapping[1]);
+
+ return 0;
+}
+
+static int q6afe_mi2s_set_sysclk(struct snd_soc_dai *dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
+ struct q6afe_port *port = dai_data->port[dai->id];
+
+ switch (clk_id) {
+ case LPAIF_DIG_CLK:
+ return q6afe_port_set_sysclk(port, clk_id, 0, 5, freq, dir);
+ case LPAIF_BIT_CLK:
+ case LPAIF_OSR_CLK:
+ return q6afe_port_set_sysclk(port, clk_id,
+ Q6AFE_LPASS_CLK_SRC_INTERNAL,
+ Q6AFE_LPASS_CLK_ROOT_DEFAULT,
+ freq, dir);
+ case Q6AFE_LPASS_CLK_ID_PRI_MI2S_IBIT ... Q6AFE_LPASS_CLK_ID_QUI_MI2S_OSR:
+ case Q6AFE_LPASS_CLK_ID_MCLK_1 ... Q6AFE_LPASS_CLK_ID_INT_MCLK_1:
+ return q6afe_port_set_sysclk(port, clk_id,
+ Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_NO,
+ Q6AFE_LPASS_CLK_ROOT_DEFAULT,
+ freq, dir);
+ case Q6AFE_LPASS_CLK_ID_PRI_TDM_IBIT ... Q6AFE_LPASS_CLK_ID_QUIN_TDM_EBIT:
+ return q6afe_port_set_sysclk(port, clk_id,
+ Q6AFE_LPASS_CLK_ATTRIBUTE_INVERT_COUPLE_NO,
+ Q6AFE_LPASS_CLK_ROOT_DEFAULT,
+ freq, dir);
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dapm_route q6afe_dapm_routes[] = {
+ {"HDMI Playback", NULL, "HDMI_RX"},
+ {"Slimbus1 Playback", NULL, "SLIMBUS_1_RX"},
+ {"Slimbus2 Playback", NULL, "SLIMBUS_2_RX"},
+ {"Slimbus3 Playback", NULL, "SLIMBUS_3_RX"},
+ {"Slimbus4 Playback", NULL, "SLIMBUS_4_RX"},
+ {"Slimbus5 Playback", NULL, "SLIMBUS_5_RX"},
+ {"Slimbus6 Playback", NULL, "SLIMBUS_6_RX"},
+
+ {"Primary MI2S Playback", NULL, "PRI_MI2S_RX"},
+ {"Secondary MI2S Playback", NULL, "SEC_MI2S_RX"},
+ {"Tertiary MI2S Playback", NULL, "TERT_MI2S_RX"},
+ {"Quaternary MI2S Playback", NULL, "QUAT_MI2S_RX"},
+
+ {"Primary TDM0 Playback", NULL, "PRIMARY_TDM_RX_0"},
+ {"Primary TDM1 Playback", NULL, "PRIMARY_TDM_RX_1"},
+ {"Primary TDM2 Playback", NULL, "PRIMARY_TDM_RX_2"},
+ {"Primary TDM3 Playback", NULL, "PRIMARY_TDM_RX_3"},
+ {"Primary TDM4 Playback", NULL, "PRIMARY_TDM_RX_4"},
+ {"Primary TDM5 Playback", NULL, "PRIMARY_TDM_RX_5"},
+ {"Primary TDM6 Playback", NULL, "PRIMARY_TDM_RX_6"},
+ {"Primary TDM7 Playback", NULL, "PRIMARY_TDM_RX_7"},
+
+ {"Secondary TDM0 Playback", NULL, "SEC_TDM_RX_0"},
+ {"Secondary TDM1 Playback", NULL, "SEC_TDM_RX_1"},
+ {"Secondary TDM2 Playback", NULL, "SEC_TDM_RX_2"},
+ {"Secondary TDM3 Playback", NULL, "SEC_TDM_RX_3"},
+ {"Secondary TDM4 Playback", NULL, "SEC_TDM_RX_4"},
+ {"Secondary TDM5 Playback", NULL, "SEC_TDM_RX_5"},
+ {"Secondary TDM6 Playback", NULL, "SEC_TDM_RX_6"},
+ {"Secondary TDM7 Playback", NULL, "SEC_TDM_RX_7"},
+
+ {"Tertiary TDM0 Playback", NULL, "TERT_TDM_RX_0"},
+ {"Tertiary TDM1 Playback", NULL, "TERT_TDM_RX_1"},
+ {"Tertiary TDM2 Playback", NULL, "TERT_TDM_RX_2"},
+ {"Tertiary TDM3 Playback", NULL, "TERT_TDM_RX_3"},
+ {"Tertiary TDM4 Playback", NULL, "TERT_TDM_RX_4"},
+ {"Tertiary TDM5 Playback", NULL, "TERT_TDM_RX_5"},
+ {"Tertiary TDM6 Playback", NULL, "TERT_TDM_RX_6"},
+ {"Tertiary TDM7 Playback", NULL, "TERT_TDM_RX_7"},
+
+ {"Quaternary TDM0 Playback", NULL, "QUAT_TDM_RX_0"},
+ {"Quaternary TDM1 Playback", NULL, "QUAT_TDM_RX_1"},
+ {"Quaternary TDM2 Playback", NULL, "QUAT_TDM_RX_2"},
+ {"Quaternary TDM3 Playback", NULL, "QUAT_TDM_RX_3"},
+ {"Quaternary TDM4 Playback", NULL, "QUAT_TDM_RX_4"},
+ {"Quaternary TDM5 Playback", NULL, "QUAT_TDM_RX_5"},
+ {"Quaternary TDM6 Playback", NULL, "QUAT_TDM_RX_6"},
+ {"Quaternary TDM7 Playback", NULL, "QUAT_TDM_RX_7"},
+
+ {"Quinary TDM0 Playback", NULL, "QUIN_TDM_RX_0"},
+ {"Quinary TDM1 Playback", NULL, "QUIN_TDM_RX_1"},
+ {"Quinary TDM2 Playback", NULL, "QUIN_TDM_RX_2"},
+ {"Quinary TDM3 Playback", NULL, "QUIN_TDM_RX_3"},
+ {"Quinary TDM4 Playback", NULL, "QUIN_TDM_RX_4"},
+ {"Quinary TDM5 Playback", NULL, "QUIN_TDM_RX_5"},
+ {"Quinary TDM6 Playback", NULL, "QUIN_TDM_RX_6"},
+ {"Quinary TDM7 Playback", NULL, "QUIN_TDM_RX_7"},
+
+ {"PRIMARY_TDM_TX_0", NULL, "Primary TDM0 Capture"},
+ {"PRIMARY_TDM_TX_1", NULL, "Primary TDM1 Capture"},
+ {"PRIMARY_TDM_TX_2", NULL, "Primary TDM2 Capture"},
+ {"PRIMARY_TDM_TX_3", NULL, "Primary TDM3 Capture"},
+ {"PRIMARY_TDM_TX_4", NULL, "Primary TDM4 Capture"},
+ {"PRIMARY_TDM_TX_5", NULL, "Primary TDM5 Capture"},
+ {"PRIMARY_TDM_TX_6", NULL, "Primary TDM6 Capture"},
+ {"PRIMARY_TDM_TX_7", NULL, "Primary TDM7 Capture"},
+
+ {"SEC_TDM_TX_0", NULL, "Secondary TDM0 Capture"},
+ {"SEC_TDM_TX_1", NULL, "Secondary TDM1 Capture"},
+ {"SEC_TDM_TX_2", NULL, "Secondary TDM2 Capture"},
+ {"SEC_TDM_TX_3", NULL, "Secondary TDM3 Capture"},
+ {"SEC_TDM_TX_4", NULL, "Secondary TDM4 Capture"},
+ {"SEC_TDM_TX_5", NULL, "Secondary TDM5 Capture"},
+ {"SEC_TDM_TX_6", NULL, "Secondary TDM6 Capture"},
+ {"SEC_TDM_TX_7", NULL, "Secondary TDM7 Capture"},
+
+ {"TERT_TDM_TX_0", NULL, "Tertiary TDM0 Capture"},
+ {"TERT_TDM_TX_1", NULL, "Tertiary TDM1 Capture"},
+ {"TERT_TDM_TX_2", NULL, "Tertiary TDM2 Capture"},
+ {"TERT_TDM_TX_3", NULL, "Tertiary TDM3 Capture"},
+ {"TERT_TDM_TX_4", NULL, "Tertiary TDM4 Capture"},
+ {"TERT_TDM_TX_5", NULL, "Tertiary TDM5 Capture"},
+ {"TERT_TDM_TX_6", NULL, "Tertiary TDM6 Capture"},
+ {"TERT_TDM_TX_7", NULL, "Tertiary TDM7 Capture"},
+
+ {"QUAT_TDM_TX_0", NULL, "Quaternary TDM0 Capture"},
+ {"QUAT_TDM_TX_1", NULL, "Quaternary TDM1 Capture"},
+ {"QUAT_TDM_TX_2", NULL, "Quaternary TDM2 Capture"},
+ {"QUAT_TDM_TX_3", NULL, "Quaternary TDM3 Capture"},
+ {"QUAT_TDM_TX_4", NULL, "Quaternary TDM4 Capture"},
+ {"QUAT_TDM_TX_5", NULL, "Quaternary TDM5 Capture"},
+ {"QUAT_TDM_TX_6", NULL, "Quaternary TDM6 Capture"},
+ {"QUAT_TDM_TX_7", NULL, "Quaternary TDM7 Capture"},
+
+ {"QUIN_TDM_TX_0", NULL, "Quinary TDM0 Capture"},
+ {"QUIN_TDM_TX_1", NULL, "Quinary TDM1 Capture"},
+ {"QUIN_TDM_TX_2", NULL, "Quinary TDM2 Capture"},
+ {"QUIN_TDM_TX_3", NULL, "Quinary TDM3 Capture"},
+ {"QUIN_TDM_TX_4", NULL, "Quinary TDM4 Capture"},
+ {"QUIN_TDM_TX_5", NULL, "Quinary TDM5 Capture"},
+ {"QUIN_TDM_TX_6", NULL, "Quinary TDM6 Capture"},
+ {"QUIN_TDM_TX_7", NULL, "Quinary TDM7 Capture"},
+
+ {"TERT_MI2S_TX", NULL, "Tertiary MI2S Capture"},
+ {"PRI_MI2S_TX", NULL, "Primary MI2S Capture"},
+ {"SEC_MI2S_TX", NULL, "Secondary MI2S Capture"},
+ {"QUAT_MI2S_TX", NULL, "Quaternary MI2S Capture"},
+};
+
+static struct snd_soc_dai_ops q6hdmi_ops = {
+ .prepare = q6afe_dai_prepare,
+ .hw_params = q6hdmi_hw_params,
+ .shutdown = q6afe_dai_shutdown,
+};
+
+static struct snd_soc_dai_ops q6i2s_ops = {
+ .prepare = q6afe_dai_prepare,
+ .hw_params = q6i2s_hw_params,
+ .set_fmt = q6i2s_set_fmt,
+ .shutdown = q6afe_dai_shutdown,
+ .set_sysclk = q6afe_mi2s_set_sysclk,
+};
+
+static struct snd_soc_dai_ops q6slim_ops = {
+ .prepare = q6afe_dai_prepare,
+ .hw_params = q6slim_hw_params,
+ .shutdown = q6afe_dai_shutdown,
+ .set_channel_map = q6slim_set_channel_map,
+};
+
+static struct snd_soc_dai_ops q6tdm_ops = {
+ .prepare = q6afe_dai_prepare,
+ .shutdown = q6afe_dai_shutdown,
+ .set_sysclk = q6afe_mi2s_set_sysclk,
+ .set_tdm_slot = q6tdm_set_tdm_slot,
+ .set_channel_map = q6tdm_set_channel_map,
+ .hw_params = q6tdm_hw_params,
+};
+
+static int msm_dai_q6_dai_probe(struct snd_soc_dai *dai)
+{
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
+ struct q6afe_port *port;
+
+ port = q6afe_port_get_from_id(dai->dev, dai->id);
+ if (IS_ERR(port)) {
+ dev_err(dai->dev, "Unable to get afe port\n");
+ return -EINVAL;
+ }
+ dai_data->port[dai->id] = port;
+
+ return 0;
+}
+
+static int msm_dai_q6_dai_remove(struct snd_soc_dai *dai)
+{
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
+
+ q6afe_port_put(dai_data->port[dai->id]);
+ dai_data->port[dai->id] = NULL;
+
+ return 0;
+}
+
+static struct snd_soc_dai_driver q6afe_dais[] = {
+ {
+ .playback = {
+ .stream_name = "HDMI Playback",
+ .rates = SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+ .channels_min = 2,
+ .channels_max = 8,
+ .rate_max = 192000,
+ .rate_min = 48000,
+ },
+ .ops = &q6hdmi_ops,
+ .id = HDMI_RX,
+ .name = "HDMI",
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .name = "SLIMBUS_0_RX",
+ .ops = &q6slim_ops,
+ .id = SLIMBUS_0_RX,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ .playback = {
+ .stream_name = "Slimbus Playback",
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ }, {
+ .playback = {
+ .stream_name = "Slimbus1 Playback",
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+ .channels_min = 1,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ .name = "SLIMBUS_1_RX",
+ .ops = &q6slim_ops,
+ .id = SLIMBUS_1_RX,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .playback = {
+ .stream_name = "Slimbus2 Playback",
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ .name = "SLIMBUS_2_RX",
+ .ops = &q6slim_ops,
+ .id = SLIMBUS_2_RX,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .playback = {
+ .stream_name = "Slimbus3 Playback",
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+ .channels_min = 1,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ .name = "SLIMBUS_3_RX",
+ .ops = &q6slim_ops,
+ .id = SLIMBUS_3_RX,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .playback = {
+ .stream_name = "Slimbus4 Playback",
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+ .channels_min = 1,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ .name = "SLIMBUS_4_RX",
+ .ops = &q6slim_ops,
+ .id = SLIMBUS_4_RX,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .playback = {
+ .stream_name = "Slimbus5 Playback",
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+ .channels_min = 1,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ .name = "SLIMBUS_5_RX",
+ .ops = &q6slim_ops,
+ .id = SLIMBUS_5_RX,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .playback = {
+ .stream_name = "Slimbus6 Playback",
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+ .channels_min = 1,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ .ops = &q6slim_ops,
+ .name = "SLIMBUS_6_RX",
+ .id = SLIMBUS_6_RX,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .playback = {
+ .stream_name = "Primary MI2S Playback",
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .id = PRIMARY_MI2S_RX,
+ .name = "PRI_MI2S_RX",
+ .ops = &q6i2s_ops,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .capture = {
+ .stream_name = "Primary MI2S Capture",
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .id = PRIMARY_MI2S_TX,
+ .name = "PRI_MI2S_TX",
+ .ops = &q6i2s_ops,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .playback = {
+ .stream_name = "Secondary MI2S Playback",
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .name = "SEC_MI2S_RX",
+ .id = SECONDARY_MI2S_RX,
+ .ops = &q6i2s_ops,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .capture = {
+ .stream_name = "Secondary MI2S Capture",
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .id = SECONDARY_MI2S_TX,
+ .name = "SEC_MI2S_TX",
+ .ops = &q6i2s_ops,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .playback = {
+ .stream_name = "Tertiary MI2S Playback",
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .name = "TERT_MI2S_RX",
+ .id = TERTIARY_MI2S_RX,
+ .ops = &q6i2s_ops,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .capture = {
+ .stream_name = "Tertiary MI2S Capture",
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .id = TERTIARY_MI2S_TX,
+ .name = "TERT_MI2S_TX",
+ .ops = &q6i2s_ops,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .playback = {
+ .stream_name = "Quaternary MI2S Playback",
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .name = "QUAT_MI2S_RX",
+ .id = QUATERNARY_MI2S_RX,
+ .ops = &q6i2s_ops,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .capture = {
+ .stream_name = "Quaternary MI2S Capture",
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .id = QUATERNARY_MI2S_TX,
+ .name = "QUAT_MI2S_TX",
+ .ops = &q6i2s_ops,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ },
+ Q6AFE_TDM_PB_DAI("Primary", 0, PRIMARY_TDM_RX_0),
+ Q6AFE_TDM_PB_DAI("Primary", 1, PRIMARY_TDM_RX_1),
+ Q6AFE_TDM_PB_DAI("Primary", 2, PRIMARY_TDM_RX_2),
+ Q6AFE_TDM_PB_DAI("Primary", 3, PRIMARY_TDM_RX_3),
+ Q6AFE_TDM_PB_DAI("Primary", 4, PRIMARY_TDM_RX_4),
+ Q6AFE_TDM_PB_DAI("Primary", 5, PRIMARY_TDM_RX_5),
+ Q6AFE_TDM_PB_DAI("Primary", 6, PRIMARY_TDM_RX_6),
+ Q6AFE_TDM_PB_DAI("Primary", 7, PRIMARY_TDM_RX_7),
+ Q6AFE_TDM_CAP_DAI("Primary", 0, PRIMARY_TDM_TX_0),
+ Q6AFE_TDM_CAP_DAI("Primary", 1, PRIMARY_TDM_TX_1),
+ Q6AFE_TDM_CAP_DAI("Primary", 2, PRIMARY_TDM_TX_2),
+ Q6AFE_TDM_CAP_DAI("Primary", 3, PRIMARY_TDM_TX_3),
+ Q6AFE_TDM_CAP_DAI("Primary", 4, PRIMARY_TDM_TX_4),
+ Q6AFE_TDM_CAP_DAI("Primary", 5, PRIMARY_TDM_TX_5),
+ Q6AFE_TDM_CAP_DAI("Primary", 6, PRIMARY_TDM_TX_6),
+ Q6AFE_TDM_CAP_DAI("Primary", 7, PRIMARY_TDM_TX_7),
+ Q6AFE_TDM_PB_DAI("Secondary", 0, SECONDARY_TDM_RX_0),
+ Q6AFE_TDM_PB_DAI("Secondary", 1, SECONDARY_TDM_RX_1),
+ Q6AFE_TDM_PB_DAI("Secondary", 2, SECONDARY_TDM_RX_2),
+ Q6AFE_TDM_PB_DAI("Secondary", 3, SECONDARY_TDM_RX_3),
+ Q6AFE_TDM_PB_DAI("Secondary", 4, SECONDARY_TDM_RX_4),
+ Q6AFE_TDM_PB_DAI("Secondary", 5, SECONDARY_TDM_RX_5),
+ Q6AFE_TDM_PB_DAI("Secondary", 6, SECONDARY_TDM_RX_6),
+ Q6AFE_TDM_PB_DAI("Secondary", 7, SECONDARY_TDM_RX_7),
+ Q6AFE_TDM_CAP_DAI("Secondary", 0, SECONDARY_TDM_TX_0),
+ Q6AFE_TDM_CAP_DAI("Secondary", 1, SECONDARY_TDM_TX_1),
+ Q6AFE_TDM_CAP_DAI("Secondary", 2, SECONDARY_TDM_TX_2),
+ Q6AFE_TDM_CAP_DAI("Secondary", 3, SECONDARY_TDM_TX_3),
+ Q6AFE_TDM_CAP_DAI("Secondary", 4, SECONDARY_TDM_TX_4),
+ Q6AFE_TDM_CAP_DAI("Secondary", 5, SECONDARY_TDM_TX_5),
+ Q6AFE_TDM_CAP_DAI("Secondary", 6, SECONDARY_TDM_TX_6),
+ Q6AFE_TDM_CAP_DAI("Secondary", 7, SECONDARY_TDM_TX_7),
+ Q6AFE_TDM_PB_DAI("Tertiary", 0, TERTIARY_TDM_RX_0),
+ Q6AFE_TDM_PB_DAI("Tertiary", 1, TERTIARY_TDM_RX_1),
+ Q6AFE_TDM_PB_DAI("Tertiary", 2, TERTIARY_TDM_RX_2),
+ Q6AFE_TDM_PB_DAI("Tertiary", 3, TERTIARY_TDM_RX_3),
+ Q6AFE_TDM_PB_DAI("Tertiary", 4, TERTIARY_TDM_RX_4),
+ Q6AFE_TDM_PB_DAI("Tertiary", 5, TERTIARY_TDM_RX_5),
+ Q6AFE_TDM_PB_DAI("Tertiary", 6, TERTIARY_TDM_RX_6),
+ Q6AFE_TDM_PB_DAI("Tertiary", 7, TERTIARY_TDM_RX_7),
+ Q6AFE_TDM_CAP_DAI("Tertiary", 0, TERTIARY_TDM_TX_0),
+ Q6AFE_TDM_CAP_DAI("Tertiary", 1, TERTIARY_TDM_TX_1),
+ Q6AFE_TDM_CAP_DAI("Tertiary", 2, TERTIARY_TDM_TX_2),
+ Q6AFE_TDM_CAP_DAI("Tertiary", 3, TERTIARY_TDM_TX_3),
+ Q6AFE_TDM_CAP_DAI("Tertiary", 4, TERTIARY_TDM_TX_4),
+ Q6AFE_TDM_CAP_DAI("Tertiary", 5, TERTIARY_TDM_TX_5),
+ Q6AFE_TDM_CAP_DAI("Tertiary", 6, TERTIARY_TDM_TX_6),
+ Q6AFE_TDM_CAP_DAI("Tertiary", 7, TERTIARY_TDM_TX_7),
+ Q6AFE_TDM_PB_DAI("Quaternary", 0, QUATERNARY_TDM_RX_0),
+ Q6AFE_TDM_PB_DAI("Quaternary", 1, QUATERNARY_TDM_RX_1),
+ Q6AFE_TDM_PB_DAI("Quaternary", 2, QUATERNARY_TDM_RX_2),
+ Q6AFE_TDM_PB_DAI("Quaternary", 3, QUATERNARY_TDM_RX_3),
+ Q6AFE_TDM_PB_DAI("Quaternary", 4, QUATERNARY_TDM_RX_4),
+ Q6AFE_TDM_PB_DAI("Quaternary", 5, QUATERNARY_TDM_RX_5),
+ Q6AFE_TDM_PB_DAI("Quaternary", 6, QUATERNARY_TDM_RX_6),
+ Q6AFE_TDM_PB_DAI("Quaternary", 7, QUATERNARY_TDM_RX_7),
+ Q6AFE_TDM_CAP_DAI("Quaternary", 0, QUATERNARY_TDM_TX_0),
+ Q6AFE_TDM_CAP_DAI("Quaternary", 1, QUATERNARY_TDM_TX_1),
+ Q6AFE_TDM_CAP_DAI("Quaternary", 2, QUATERNARY_TDM_TX_2),
+ Q6AFE_TDM_CAP_DAI("Quaternary", 3, QUATERNARY_TDM_TX_3),
+ Q6AFE_TDM_CAP_DAI("Quaternary", 4, QUATERNARY_TDM_TX_4),
+ Q6AFE_TDM_CAP_DAI("Quaternary", 5, QUATERNARY_TDM_TX_5),
+ Q6AFE_TDM_CAP_DAI("Quaternary", 6, QUATERNARY_TDM_TX_6),
+ Q6AFE_TDM_CAP_DAI("Quaternary", 7, QUATERNARY_TDM_TX_7),
+ Q6AFE_TDM_PB_DAI("Quinary", 0, QUINARY_TDM_RX_0),
+ Q6AFE_TDM_PB_DAI("Quinary", 1, QUINARY_TDM_RX_1),
+ Q6AFE_TDM_PB_DAI("Quinary", 2, QUINARY_TDM_RX_2),
+ Q6AFE_TDM_PB_DAI("Quinary", 3, QUINARY_TDM_RX_3),
+ Q6AFE_TDM_PB_DAI("Quinary", 4, QUINARY_TDM_RX_4),
+ Q6AFE_TDM_PB_DAI("Quinary", 5, QUINARY_TDM_RX_5),
+ Q6AFE_TDM_PB_DAI("Quinary", 6, QUINARY_TDM_RX_6),
+ Q6AFE_TDM_PB_DAI("Quinary", 7, QUINARY_TDM_RX_7),
+ Q6AFE_TDM_CAP_DAI("Quinary", 0, QUINARY_TDM_TX_0),
+ Q6AFE_TDM_CAP_DAI("Quinary", 1, QUINARY_TDM_TX_1),
+ Q6AFE_TDM_CAP_DAI("Quinary", 2, QUINARY_TDM_TX_2),
+ Q6AFE_TDM_CAP_DAI("Quinary", 3, QUINARY_TDM_TX_3),
+ Q6AFE_TDM_CAP_DAI("Quinary", 4, QUINARY_TDM_TX_4),
+ Q6AFE_TDM_CAP_DAI("Quinary", 5, QUINARY_TDM_TX_5),
+ Q6AFE_TDM_CAP_DAI("Quinary", 6, QUINARY_TDM_TX_6),
+ Q6AFE_TDM_CAP_DAI("Quinary", 7, QUINARY_TDM_TX_7),
+};
+
+static int q6afe_of_xlate_dai_name(struct snd_soc_component *component,
+ struct of_phandle_args *args,
+ const char **dai_name)
+{
+ int id = args->args[0];
+ int ret = -EINVAL;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(q6afe_dais); i++) {
+ if (q6afe_dais[i].id == id) {
+ *dai_name = q6afe_dais[i].name;
+ ret = 0;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static const struct snd_soc_dapm_widget q6afe_dai_widgets[] = {
+ SND_SOC_DAPM_AIF_OUT("HDMI_RX", "HDMI Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_0_RX", "Slimbus Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_1_RX", "Slimbus1 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_2_RX", "Slimbus2 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_3_RX", "Slimbus3 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_4_RX", "Slimbus4 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_5_RX", "Slimbus5 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_6_RX", "Slimbus6 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUAT_MI2S_RX", "Quaternary MI2S Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUAT_MI2S_TX", "Quaternary MI2S Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("TERT_MI2S_RX", "Tertiary MI2S Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("TERT_MI2S_TX", "Tertiary MI2S Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SEC_MI2S_RX", "Secondary MI2S Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SEC_MI2S_TX", "Secondary MI2S Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SEC_MI2S_RX_SD1",
+ "Secondary MI2S Playback SD1",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("PRI_MI2S_RX", "Primary MI2S Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("PRI_MI2S_TX", "Primary MI2S Capture",
+ 0, 0, 0, 0),
+
+ SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_0", "Primary TDM0 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_1", "Primary TDM1 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_2", "Primary TDM2 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_3", "Primary TDM3 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_4", "Primary TDM4 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_5", "Primary TDM5 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_6", "Primary TDM6 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_7", "Primary TDM7 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_0", "Primary TDM0 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_1", "Primary TDM1 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_2", "Primary TDM2 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_3", "Primary TDM3 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_4", "Primary TDM4 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_5", "Primary TDM5 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_6", "Primary TDM6 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_7", "Primary TDM7 Capture",
+ 0, 0, 0, 0),
+
+ SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_0", "Secondary TDM0 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_1", "Secondary TDM1 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_2", "Secondary TDM2 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_3", "Secondary TDM3 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_4", "Secondary TDM4 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_5", "Secondary TDM5 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_6", "Secondary TDM6 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_7", "Secondary TDM7 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_0", "Secondary TDM0 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_1", "Secondary TDM1 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_2", "Secondary TDM2 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_3", "Secondary TDM3 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_4", "Secondary TDM4 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_5", "Secondary TDM5 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_6", "Secondary TDM6 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_7", "Secondary TDM7 Capture",
+ 0, 0, 0, 0),
+
+ SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_0", "Tertiary TDM0 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_1", "Tertiary TDM1 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_2", "Tertiary TDM2 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_3", "Tertiary TDM3 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_4", "Tertiary TDM4 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_5", "Tertiary TDM5 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_6", "Tertiary TDM6 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_7", "Tertiary TDM7 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_0", "Tertiary TDM0 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_1", "Tertiary TDM1 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_2", "Tertiary TDM2 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_3", "Tertiary TDM3 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_4", "Tertiary TDM4 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_5", "Tertiary TDM5 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_6", "Tertiary TDM6 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_7", "Tertiary TDM7 Capture",
+ 0, 0, 0, 0),
+
+ SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_0", "Quaternary TDM0 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_1", "Quaternary TDM1 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_2", "Quaternary TDM2 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_3", "Quaternary TDM3 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_4", "Quaternary TDM4 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_5", "Quaternary TDM5 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_6", "Quaternary TDM6 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_7", "Quaternary TDM7 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_0", "Quaternary TDM0 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_1", "Quaternary TDM1 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_2", "Quaternary TDM2 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_3", "Quaternary TDM3 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_4", "Quaternary TDM4 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_5", "Quaternary TDM5 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_6", "Quaternary TDM6 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_7", "Quaternary TDM7 Capture",
+ 0, 0, 0, 0),
+
+ SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_0", "Quinary TDM0 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_1", "Quinary TDM1 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_2", "Quinary TDM2 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_3", "Quinary TDM3 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_4", "Quinary TDM4 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_5", "Quinary TDM5 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_6", "Quinary TDM6 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_7", "Quinary TDM7 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_0", "Quinary TDM0 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_1", "Quinary TDM1 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_2", "Quinary TDM2 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_3", "Quinary TDM3 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_4", "Quinary TDM4 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_5", "Quinary TDM5 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_6", "Quinary TDM6 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_7", "Quinary TDM7 Capture",
+ 0, 0, 0, 0),
+};
+
+static const struct snd_soc_component_driver q6afe_dai_component = {
+ .name = "q6afe-dai-component",
+ .dapm_widgets = q6afe_dai_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(q6afe_dai_widgets),
+ .dapm_routes = q6afe_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(q6afe_dapm_routes),
+ .of_xlate_dai_name = q6afe_of_xlate_dai_name,
+
+};
+
+static void of_q6afe_parse_dai_data(struct device *dev,
+ struct q6afe_dai_data *data)
+{
+ struct device_node *node;
+ int ret;
+
+ for_each_child_of_node(dev->of_node, node) {
+ unsigned int lines[Q6AFE_MAX_MI2S_LINES];
+ struct q6afe_dai_priv_data *priv;
+ int id, i, num_lines;
+
+ ret = of_property_read_u32(node, "reg", &id);
+ if (ret || id > AFE_PORT_MAX) {
+ dev_err(dev, "valid dai id not found:%d\n", ret);
+ continue;
+ }
+
+ switch (id) {
+ /* MI2S specific properties */
+ case PRIMARY_MI2S_RX ... QUATERNARY_MI2S_TX:
+ priv = &data->priv[id];
+ ret = of_property_read_variable_u32_array(node,
+ "qcom,sd-lines",
+ lines, 0,
+ Q6AFE_MAX_MI2S_LINES);
+ if (ret < 0)
+ num_lines = 0;
+ else
+ num_lines = ret;
+
+ priv->sd_line_mask = 0;
+
+ for (i = 0; i < num_lines; i++)
+ priv->sd_line_mask |= BIT(lines[i]);
+
+ break;
+ case PRIMARY_TDM_RX_0 ... QUINARY_TDM_TX_7:
+ priv = &data->priv[id];
+ ret = of_property_read_u32(node, "qcom,tdm-sync-mode",
+ &priv->sync_mode);
+ if (ret) {
+ dev_err(dev, "No Sync mode from DT\n");
+ break;
+ }
+ ret = of_property_read_u32(node, "qcom,tdm-sync-src",
+ &priv->sync_src);
+ if (ret) {
+ dev_err(dev, "No Sync Src from DT\n");
+ break;
+ }
+ ret = of_property_read_u32(node, "qcom,tdm-data-out",
+ &priv->data_out_enable);
+ if (ret) {
+ dev_err(dev, "No Data out enable from DT\n");
+ break;
+ }
+ ret = of_property_read_u32(node, "qcom,tdm-invert-sync",
+ &priv->invert_sync);
+ if (ret) {
+ dev_err(dev, "No Invert sync from DT\n");
+ break;
+ }
+ ret = of_property_read_u32(node, "qcom,tdm-data-delay",
+ &priv->data_delay);
+ if (ret) {
+ dev_err(dev, "No Data Delay from DT\n");
+ break;
+ }
+ ret = of_property_read_u32(node, "qcom,tdm-data-align",
+ &priv->data_align);
+ if (ret) {
+ dev_err(dev, "No Data align from DT\n");
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static int q6afe_dai_bind(struct device *dev, struct device *master, void *data)
+{
+ struct q6afe_dai_data *dai_data;
+
+ dai_data = kzalloc(sizeof(*dai_data), GFP_KERNEL);
+ if (!dai_data)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, dai_data);
+
+ of_q6afe_parse_dai_data(dev, dai_data);
+
+ return snd_soc_register_component(dev, &q6afe_dai_component,
+ q6afe_dais, ARRAY_SIZE(q6afe_dais));
+}
+
+static void q6afe_dai_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dev);
+
+ snd_soc_unregister_component(dev);
+ kfree(dai_data);
+}
+
+static const struct component_ops q6afe_dai_comp_ops = {
+ .bind = q6afe_dai_bind,
+ .unbind = q6afe_dai_unbind,
+};
+
+static int q6afe_dai_dev_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &q6afe_dai_comp_ops);
+}
+
+static int q6afe_dai_dev_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &q6afe_dai_comp_ops);
+ return 0;
+}
+
+static struct platform_driver q6afe_dai_platform_driver = {
+ .driver = {
+ .name = "q6afe-dai",
+ },
+ .probe = q6afe_dai_dev_probe,
+ .remove = q6afe_dai_dev_remove,
+};
+module_platform_driver(q6afe_dai_platform_driver);
+
+MODULE_DESCRIPTION("Q6 Audio Fronend dai driver");
+MODULE_LICENSE("GPL v2");
diff --git a/rr-cache/df72d29db70d6c63c309680f5104de95fee0c2ce/preimage b/rr-cache/df72d29db70d6c63c309680f5104de95fee0c2ce/preimage
new file mode 100644
index 0000000..b966e8e
--- /dev/null
+++ b/rr-cache/df72d29db70d6c63c309680f5104de95fee0c2ce/preimage
@@ -0,0 +1,1638 @@
+// SPDX-License-Identifier: GPL-2.0
+<<<<<<<
+/*
+ * Copyright (c) 2011-2016, The Linux Foundation
+ * Copyright (c) 2018, Linaro Limited
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+=======
+// Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+// Copyright (c) 2018, Linaro Limited
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/component.h>
+>>>>>>>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/pcm_params.h>
+#include "q6afe.h"
+
+<<<<<<<
+=======
+#define Q6AFE_TDM_PB_DAI(pre, num, did) { \
+ .playback = { \
+ .stream_name = pre" TDM"#num" Playback", \
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |\
+ SNDRV_PCM_RATE_176400, \
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE, \
+ .channels_min = 1, \
+ .channels_max = 8, \
+ .rate_min = 8000, \
+ .rate_max = 176400, \
+ }, \
+ .name = #did, \
+ .ops = &q6tdm_ops, \
+ .id = did, \
+ .probe = msm_dai_q6_dai_probe, \
+ .remove = msm_dai_q6_dai_remove, \
+ }
+
+#define Q6AFE_TDM_CAP_DAI(pre, num, did) { \
+ .capture = { \
+ .stream_name = pre" TDM"#num" Capture", \
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |\
+ SNDRV_PCM_RATE_176400, \
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE, \
+ .channels_min = 1, \
+ .channels_max = 8, \
+ .rate_min = 8000, \
+ .rate_max = 176400, \
+ }, \
+ .name = #did, \
+ .ops = &q6tdm_ops, \
+ .id = did, \
+ .probe = msm_dai_q6_dai_probe, \
+ .remove = msm_dai_q6_dai_remove, \
+ }
+
+struct q6afe_dai_priv_data {
+ uint32_t sd_line_mask;
+ uint32_t sync_mode;
+ uint32_t sync_src;
+ uint32_t data_out_enable;
+ uint32_t invert_sync;
+ uint32_t data_delay;
+ uint32_t data_align;
+};
+
+>>>>>>>
+struct q6afe_dai_data {
+ struct q6afe_port *port[AFE_PORT_MAX];
+ struct q6afe_port_config port_config[AFE_PORT_MAX];
+ bool is_port_started[AFE_PORT_MAX];
+<<<<<<<
+ struct q6afe_dai_priv_data priv[AFE_PORT_MAX];
+=======
+};
+
+static int q6hdmi_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct q6afe_dai_data *dai_data = kcontrol->private_data;
+ int value = ucontrol->value.integer.value[0];
+
+ dai_data->port_config[AFE_PORT_HDMI_RX].hdmi.datatype = value;
+
+ return 0;
+}
+
+static int q6hdmi_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+
+ struct q6afe_dai_data *dai_data = kcontrol->private_data;
+
+ ucontrol->value.integer.value[0] =
+ dai_data->port_config[AFE_PORT_HDMI_RX].hdmi.datatype;
+
+ return 0;
+}
+
+static const char * const hdmi_format[] = {
+ "LPCM",
+ "Compr"
+};
+
+static const struct soc_enum hdmi_config_enum[] = {
+ SOC_ENUM_SINGLE_EXT(2, hdmi_format),
+};
+
+static const struct snd_kcontrol_new q6afe_config_controls[] = {
+ SOC_ENUM_EXT("HDMI RX Format", hdmi_config_enum[0],
+ q6hdmi_format_get,
+ q6hdmi_format_put),
+>>>>>>>
+};
+
+static int q6slim_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+
+<<<<<<<
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
+=======
+ struct q6afe_dai_data *dai_data = q6afe_get_dai_data(dai->dev);
+>>>>>>>
+ struct q6afe_slim_cfg *slim = &dai_data->port_config[dai->id].slim;
+
+ slim->num_channels = params_channels(params);
+ slim->sample_rate = params_rate(params);
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ case SNDRV_PCM_FORMAT_SPECIAL:
+ slim->bit_width = 16;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ slim->bit_width = 24;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ slim->bit_width = 32;
+ break;
+ default:
+ pr_err("%s: format %d\n",
+ __func__, params_format(params));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int q6hdmi_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+<<<<<<<
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
+=======
+ struct q6afe_dai_data *dai_data = q6afe_get_dai_data(dai->dev);
+>>>>>>>
+ int channels = params_channels(params);
+ struct q6afe_hdmi_cfg *hdmi = &dai_data->port_config[dai->id].hdmi;
+
+ hdmi->sample_rate = params_rate(params);
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ hdmi->bit_width = 16;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ hdmi->bit_width = 24;
+ break;
+ }
+
+<<<<<<<
+ /* HDMI spec CEA-861-E: Table 28 Audio InfoFrame Data Byte 4 */
+=======
+ /*refer to HDMI spec CEA-861-E: Table 28 Audio InfoFrame Data Byte 4*/
+>>>>>>>
+ switch (channels) {
+ case 2:
+ hdmi->channel_allocation = 0;
+ break;
+ case 3:
+ hdmi->channel_allocation = 0x02;
+ break;
+ case 4:
+ hdmi->channel_allocation = 0x06;
+ break;
+ case 5:
+ hdmi->channel_allocation = 0x0A;
+ break;
+ case 6:
+ hdmi->channel_allocation = 0x0B;
+ break;
+ case 7:
+ hdmi->channel_allocation = 0x12;
+ break;
+ case 8:
+ hdmi->channel_allocation = 0x13;
+ break;
+ default:
+ dev_err(dai->dev, "invalid Channels = %u\n", channels);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int q6i2s_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+<<<<<<<
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
+ struct q6afe_i2s_cfg *i2s = &dai_data->port_config[dai->id].i2s_cfg;
+
+ i2s->sample_rate = params_rate(params);
+ i2s->bit_width = params_width(params);
+ i2s->num_channels = params_channels(params);
+ i2s->sd_line_mask = dai_data->priv[dai->id].sd_line_mask;
+=======
+ struct q6afe_dai_data *dai_data = q6afe_get_dai_data(dai->dev);
+ struct q6afe_i2s_cfg *i2s = &dai_data->port_config[dai->id].i2s_cfg;
+
+
+ i2s->sample_rate = params_rate(params);
+ i2s->bit_width = params_width(params);
+ i2s->num_channels = params_channels(params);
+>>>>>>>
+
+ return 0;
+}
+
+static int q6i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+<<<<<<<
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
+=======
+ struct q6afe_dai_data *dai_data = q6afe_get_dai_data(dai->dev);
+>>>>>>>
+ struct q6afe_i2s_cfg *i2s = &dai_data->port_config[dai->id].i2s_cfg;
+
+ i2s->fmt = fmt;
+
+ return 0;
+}
+
+<<<<<<<
+static int q6afe_dai_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct q6afe_dai_data *dai_data = q6afe_get_dai_data(dai->dev);
+
+ dai_data->is_port_started[dai->id] = false;
+
+ return 0;
+}
+
+static void q6afe_dai_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct q6afe_dai_data *dai_data = q6afe_get_dai_data(dai->dev);
+ int rc;
+
+ rc = q6afe_port_stop(dai_data->port[dai->id]);
+ if (rc < 0)
+ dev_err(dai->dev, "fail to close AFE port\n");
+
+ dai_data->is_port_started[dai->id] = false;
+
+}
+
+static int q6afe_mi2s_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct q6afe_dai_data *dai_data = q6afe_get_dai_data(dai->dev);
+ int rc;
+
+ if (dai_data->is_port_started[dai->id]) {
+ /* stop the port and restart with new port config */
+ rc = q6afe_port_stop(dai_data->port[dai->id]);
+ if (rc < 0) {
+ dev_err(dai->dev, "fail to close AFE port\n");
+ return rc;
+ }
+ }
+
+ q6afe_i2s_port_prepare(dai_data->port[dai->id],
+ &dai_data->port_config[dai->id].i2s_cfg);
+
+ rc = q6afe_port_start(dai_data->port[dai->id]);
+ if (rc < 0) {
+ dev_err(dai->dev, "fail to start AFE port %x\n", dai->id);
+ return rc;
+ }
+ dai_data->is_port_started[dai->id] = true;
+
+ return 0;
+=======
+static int q6tdm_set_tdm_slot(struct snd_soc_dai *dai,
+ unsigned int tx_mask,
+ unsigned int rx_mask,
+ int slots, int slot_width)
+{
+
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
+ struct q6afe_tdm_cfg *tdm = &dai_data->port_config[dai->id].tdm;
+ unsigned int cap_mask;
+ int rc = 0;
+
+ /* HW only supports 16 and 32 bit slot width configuration */
+ if ((slot_width != 16) && (slot_width != 32)) {
+ dev_err(dai->dev, "%s: invalid slot_width %d\n",
+ __func__, slot_width);
+ return -EINVAL;
+ }
+
+ /* HW supports 1-32 slots configuration. Typical: 1, 2, 4, 8, 16, 32 */
+ switch (slots) {
+ case 2:
+ cap_mask = 0x03;
+ break;
+ case 4:
+ cap_mask = 0x0F;
+ break;
+ case 8:
+ cap_mask = 0xFF;
+ break;
+ case 16:
+ cap_mask = 0xFFFF;
+ break;
+ default:
+ dev_err(dai->dev, "%s: invalid slots %d\n",
+ __func__, slots);
+ return -EINVAL;
+ }
+
+ switch (dai->id) {
+ case PRIMARY_TDM_RX_0 ... QUINARY_TDM_TX_7:
+ tdm->nslots_per_frame = slots;
+ tdm->slot_width = slot_width;
+ /* TDM RX dais ids are even and tx are odd */
+ tdm->slot_mask = (dai->id & 0x1 ? tx_mask : rx_mask) & cap_mask;
+ break;
+ default:
+ dev_err(dai->dev, "%s: invalid dai id 0x%x\n",
+ __func__, dai->id);
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int q6tdm_set_channel_map(struct snd_soc_dai *dai,
+ unsigned int tx_num, unsigned int *tx_slot,
+ unsigned int rx_num, unsigned int *rx_slot)
+{
+
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
+ struct q6afe_tdm_cfg *tdm = &dai_data->port_config[dai->id].tdm;
+ int rc = 0;
+ int i = 0;
+
+ switch (dai->id) {
+ case PRIMARY_TDM_RX_0 ... QUINARY_TDM_TX_7:
+ if (dai->id & 0x1) {
+ if (!tx_slot) {
+ dev_err(dai->dev, "tx slot not found\n");
+ return -EINVAL;
+ }
+ if (tx_num > AFE_PORT_MAX_AUDIO_CHAN_CNT) {
+ dev_err(dai->dev, "invalid tx num %d\n",
+ tx_num);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < tx_num; i++)
+ tdm->ch_mapping[i] = tx_slot[i];
+
+ for (i = tx_num; i < AFE_PORT_MAX_AUDIO_CHAN_CNT; i++)
+ tdm->ch_mapping[i] = Q6AFE_CMAP_INVALID;
+
+ tdm->num_channels = tx_num;
+ } else {
+ /* rx */
+ if (!rx_slot) {
+ dev_err(dai->dev, "rx slot not found\n");
+ return -EINVAL;
+ }
+ if (rx_num > AFE_PORT_MAX_AUDIO_CHAN_CNT) {
+ dev_err(dai->dev, "invalid rx num %d\n",
+ rx_num);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < rx_num; i++)
+ tdm->ch_mapping[i] = rx_slot[i];
+
+ for (i = rx_num; i < AFE_PORT_MAX_AUDIO_CHAN_CNT; i++)
+ tdm->ch_mapping[i] = Q6AFE_CMAP_INVALID;
+
+ tdm->num_channels = rx_num;
+ }
+
+ break;
+ default:
+ dev_err(dai->dev, "%s: invalid dai id 0x%x\n",
+ __func__, dai->id);
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int q6tdm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
+ struct q6afe_tdm_cfg *tdm = &dai_data->port_config[dai->id].tdm;
+
+ tdm->bit_width = params_width(params);
+ tdm->sample_rate = params_rate(params);
+ tdm->num_channels = params_channels(params);
+ tdm->data_align_type = dai_data->priv[dai->id].data_align;
+ tdm->sync_src = dai_data->priv[dai->id].sync_src;
+ tdm->sync_mode = dai_data->priv[dai->id].sync_mode;
+
+ return 0;
+}
+static void q6afe_dai_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
+ int rc;
+
+ rc = q6afe_port_stop(dai_data->port[dai->id]);
+ if (rc < 0)
+ dev_err(dai->dev, "fail to close AFE port (%d)\n", rc);
+
+ dai_data->is_port_started[dai->id] = false;
+
+>>>>>>>
+}
+
+static int q6afe_dai_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+<<<<<<<
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
+=======
+ struct q6afe_dai_data *dai_data = q6afe_get_dai_data(dai->dev);
+>>>>>>>
+ int rc;
+
+ if (dai_data->is_port_started[dai->id]) {
+ /* stop the port and restart with new port config */
+ rc = q6afe_port_stop(dai_data->port[dai->id]);
+ if (rc < 0) {
+<<<<<<<
+ dev_err(dai->dev, "fail to close AFE port (%d)\n", rc);
+=======
+ dev_err(dai->dev, "fail to close AFE port\n");
+>>>>>>>
+ return rc;
+ }
+ }
+
+<<<<<<<
+ if (dai->id == AFE_PORT_HDMI_RX)
+ q6afe_hdmi_port_prepare(dai_data->port[dai->id],
+ &dai_data->port_config[dai->id].hdmi);
+ else if (dai->id >= SLIMBUS_0_RX && dai->id <= SLIMBUS_6_TX)
+ q6afe_slim_port_prepare(dai_data->port[dai->id],
+ &dai_data->port_config[dai->id].slim);
+=======
+ switch (dai->id) {
+ case HDMI_RX:
+ q6afe_hdmi_port_prepare(dai_data->port[dai->id],
+ &dai_data->port_config[dai->id].hdmi);
+ break;
+ case SLIMBUS_0_RX ... SLIMBUS_6_TX:
+ q6afe_slim_port_prepare(dai_data->port[dai->id],
+ &dai_data->port_config[dai->id].slim);
+ break;
+ case PRIMARY_MI2S_RX ... QUATERNARY_MI2S_TX:
+ rc = q6afe_i2s_port_prepare(dai_data->port[dai->id],
+ &dai_data->port_config[dai->id].i2s_cfg);
+ if (rc < 0) {
+ dev_err(dai->dev, "fail to prepare AFE port %x\n",
+ dai->id);
+ return rc;
+ }
+ break;
+ case PRIMARY_TDM_RX_0 ... QUINARY_TDM_TX_7:
+ q6afe_tdm_port_prepare(dai_data->port[dai->id],
+ &dai_data->port_config[dai->id].tdm);
+ break;
+ default:
+ return -EINVAL;
+ }
+>>>>>>>
+
+ rc = q6afe_port_start(dai_data->port[dai->id]);
+ if (rc < 0) {
+ dev_err(dai->dev, "fail to start AFE port %x\n", dai->id);
+ return rc;
+ }
+ dai_data->is_port_started[dai->id] = true;
+
+ return 0;
+}
+
+static int q6slim_set_channel_map(struct snd_soc_dai *dai,
+ unsigned int tx_num, unsigned int *tx_slot,
+ unsigned int rx_num, unsigned int *rx_slot)
+{
+<<<<<<<
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
+=======
+ struct q6afe_dai_data *dai_data = q6afe_get_dai_data(dai->dev);
+>>>>>>>
+ struct q6afe_port_config *pcfg = &dai_data->port_config[dai->id];
+ int i;
+
+ if (!rx_slot) {
+ pr_err("%s: rx slot not found\n", __func__);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < rx_num; i++) {
+ pcfg->slim.ch_mapping[i] = rx_slot[i];
+ pr_debug("%s: find number of channels[%d] ch[%d]\n",
+ __func__, i, rx_slot[i]);
+ }
+
+ pcfg->slim.num_channels = rx_num;
+
+ pr_debug("%s: SLIMBUS_%d_RX cnt[%d] ch[%d %d]\n", __func__,
+ (dai->id - SLIMBUS_0_RX) / 2, rx_num,
+ pcfg->slim.ch_mapping[0],
+ pcfg->slim.ch_mapping[1]);
+
+ return 0;
+}
+
+static int q6afe_mi2s_set_sysclk(struct snd_soc_dai *dai,
+ int clk_id, unsigned int freq, int dir)
+{
+<<<<<<<
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
+=======
+ struct q6afe_dai_data *dai_data = q6afe_get_dai_data(dai->dev);
+>>>>>>>
+ struct q6afe_port *port = dai_data->port[dai->id];
+
+ switch (clk_id) {
+ case LPAIF_DIG_CLK:
+ return q6afe_port_set_sysclk(port, clk_id, 0, 5, freq, dir);
+ case LPAIF_BIT_CLK:
+ case LPAIF_OSR_CLK:
+ return q6afe_port_set_sysclk(port, clk_id,
+ Q6AFE_LPASS_CLK_SRC_INTERNAL,
+ Q6AFE_LPASS_CLK_ROOT_DEFAULT,
+ freq, dir);
+<<<<<<<
+=======
+ case Q6AFE_LPASS_CLK_ID_PRI_MI2S_IBIT ... Q6AFE_LPASS_CLK_ID_QUI_MI2S_OSR:
+ case Q6AFE_LPASS_CLK_ID_MCLK_1 ... Q6AFE_LPASS_CLK_ID_INT_MCLK_1:
+ return q6afe_port_set_sysclk(port, clk_id,
+ Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_NO,
+ Q6AFE_LPASS_CLK_ROOT_DEFAULT,
+ freq, dir);
+ case Q6AFE_LPASS_CLK_ID_PRI_TDM_IBIT ... Q6AFE_LPASS_CLK_ID_QUIN_TDM_EBIT:
+ return q6afe_port_set_sysclk(port, clk_id,
+ Q6AFE_LPASS_CLK_ATTRIBUTE_INVERT_COUPLE_NO,
+ Q6AFE_LPASS_CLK_ROOT_DEFAULT,
+ freq, dir);
+>>>>>>>
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dapm_route q6afe_dapm_routes[] = {
+ {"HDMI Playback", NULL, "HDMI_RX"},
+ {"Slimbus1 Playback", NULL, "SLIMBUS_1_RX"},
+ {"Slimbus2 Playback", NULL, "SLIMBUS_2_RX"},
+ {"Slimbus3 Playback", NULL, "SLIMBUS_3_RX"},
+ {"Slimbus4 Playback", NULL, "SLIMBUS_4_RX"},
+ {"Slimbus5 Playback", NULL, "SLIMBUS_5_RX"},
+ {"Slimbus6 Playback", NULL, "SLIMBUS_6_RX"},
+
+ {"Primary MI2S Playback", NULL, "PRI_MI2S_RX"},
+ {"Secondary MI2S Playback", NULL, "SEC_MI2S_RX"},
+ {"Tertiary MI2S Playback", NULL, "TERT_MI2S_RX"},
+ {"Quaternary MI2S Playback", NULL, "QUAT_MI2S_RX"},
+
+<<<<<<<
+=======
+ {"Primary TDM0 Playback", NULL, "PRIMARY_TDM_RX_0"},
+ {"Primary TDM1 Playback", NULL, "PRIMARY_TDM_RX_1"},
+ {"Primary TDM2 Playback", NULL, "PRIMARY_TDM_RX_2"},
+ {"Primary TDM3 Playback", NULL, "PRIMARY_TDM_RX_3"},
+ {"Primary TDM4 Playback", NULL, "PRIMARY_TDM_RX_4"},
+ {"Primary TDM5 Playback", NULL, "PRIMARY_TDM_RX_5"},
+ {"Primary TDM6 Playback", NULL, "PRIMARY_TDM_RX_6"},
+ {"Primary TDM7 Playback", NULL, "PRIMARY_TDM_RX_7"},
+
+ {"Secondary TDM0 Playback", NULL, "SEC_TDM_RX_0"},
+ {"Secondary TDM1 Playback", NULL, "SEC_TDM_RX_1"},
+ {"Secondary TDM2 Playback", NULL, "SEC_TDM_RX_2"},
+ {"Secondary TDM3 Playback", NULL, "SEC_TDM_RX_3"},
+ {"Secondary TDM4 Playback", NULL, "SEC_TDM_RX_4"},
+ {"Secondary TDM5 Playback", NULL, "SEC_TDM_RX_5"},
+ {"Secondary TDM6 Playback", NULL, "SEC_TDM_RX_6"},
+ {"Secondary TDM7 Playback", NULL, "SEC_TDM_RX_7"},
+
+ {"Tertiary TDM0 Playback", NULL, "TERT_TDM_RX_0"},
+ {"Tertiary TDM1 Playback", NULL, "TERT_TDM_RX_1"},
+ {"Tertiary TDM2 Playback", NULL, "TERT_TDM_RX_2"},
+ {"Tertiary TDM3 Playback", NULL, "TERT_TDM_RX_3"},
+ {"Tertiary TDM4 Playback", NULL, "TERT_TDM_RX_4"},
+ {"Tertiary TDM5 Playback", NULL, "TERT_TDM_RX_5"},
+ {"Tertiary TDM6 Playback", NULL, "TERT_TDM_RX_6"},
+ {"Tertiary TDM7 Playback", NULL, "TERT_TDM_RX_7"},
+
+ {"Quaternary TDM0 Playback", NULL, "QUAT_TDM_RX_0"},
+ {"Quaternary TDM1 Playback", NULL, "QUAT_TDM_RX_1"},
+ {"Quaternary TDM2 Playback", NULL, "QUAT_TDM_RX_2"},
+ {"Quaternary TDM3 Playback", NULL, "QUAT_TDM_RX_3"},
+ {"Quaternary TDM4 Playback", NULL, "QUAT_TDM_RX_4"},
+ {"Quaternary TDM5 Playback", NULL, "QUAT_TDM_RX_5"},
+ {"Quaternary TDM6 Playback", NULL, "QUAT_TDM_RX_6"},
+ {"Quaternary TDM7 Playback", NULL, "QUAT_TDM_RX_7"},
+
+ {"Quinary TDM0 Playback", NULL, "QUIN_TDM_RX_0"},
+ {"Quinary TDM1 Playback", NULL, "QUIN_TDM_RX_1"},
+ {"Quinary TDM2 Playback", NULL, "QUIN_TDM_RX_2"},
+ {"Quinary TDM3 Playback", NULL, "QUIN_TDM_RX_3"},
+ {"Quinary TDM4 Playback", NULL, "QUIN_TDM_RX_4"},
+ {"Quinary TDM5 Playback", NULL, "QUIN_TDM_RX_5"},
+ {"Quinary TDM6 Playback", NULL, "QUIN_TDM_RX_6"},
+ {"Quinary TDM7 Playback", NULL, "QUIN_TDM_RX_7"},
+
+ {"PRIMARY_TDM_TX_0", NULL, "Primary TDM0 Capture"},
+ {"PRIMARY_TDM_TX_1", NULL, "Primary TDM1 Capture"},
+ {"PRIMARY_TDM_TX_2", NULL, "Primary TDM2 Capture"},
+ {"PRIMARY_TDM_TX_3", NULL, "Primary TDM3 Capture"},
+ {"PRIMARY_TDM_TX_4", NULL, "Primary TDM4 Capture"},
+ {"PRIMARY_TDM_TX_5", NULL, "Primary TDM5 Capture"},
+ {"PRIMARY_TDM_TX_6", NULL, "Primary TDM6 Capture"},
+ {"PRIMARY_TDM_TX_7", NULL, "Primary TDM7 Capture"},
+
+ {"SEC_TDM_TX_0", NULL, "Secondary TDM0 Capture"},
+ {"SEC_TDM_TX_1", NULL, "Secondary TDM1 Capture"},
+ {"SEC_TDM_TX_2", NULL, "Secondary TDM2 Capture"},
+ {"SEC_TDM_TX_3", NULL, "Secondary TDM3 Capture"},
+ {"SEC_TDM_TX_4", NULL, "Secondary TDM4 Capture"},
+ {"SEC_TDM_TX_5", NULL, "Secondary TDM5 Capture"},
+ {"SEC_TDM_TX_6", NULL, "Secondary TDM6 Capture"},
+ {"SEC_TDM_TX_7", NULL, "Secondary TDM7 Capture"},
+
+ {"TERT_TDM_TX_0", NULL, "Tertiary TDM0 Capture"},
+ {"TERT_TDM_TX_1", NULL, "Tertiary TDM1 Capture"},
+ {"TERT_TDM_TX_2", NULL, "Tertiary TDM2 Capture"},
+ {"TERT_TDM_TX_3", NULL, "Tertiary TDM3 Capture"},
+ {"TERT_TDM_TX_4", NULL, "Tertiary TDM4 Capture"},
+ {"TERT_TDM_TX_5", NULL, "Tertiary TDM5 Capture"},
+ {"TERT_TDM_TX_6", NULL, "Tertiary TDM6 Capture"},
+ {"TERT_TDM_TX_7", NULL, "Tertiary TDM7 Capture"},
+
+ {"QUAT_TDM_TX_0", NULL, "Quaternary TDM0 Capture"},
+ {"QUAT_TDM_TX_1", NULL, "Quaternary TDM1 Capture"},
+ {"QUAT_TDM_TX_2", NULL, "Quaternary TDM2 Capture"},
+ {"QUAT_TDM_TX_3", NULL, "Quaternary TDM3 Capture"},
+ {"QUAT_TDM_TX_4", NULL, "Quaternary TDM4 Capture"},
+ {"QUAT_TDM_TX_5", NULL, "Quaternary TDM5 Capture"},
+ {"QUAT_TDM_TX_6", NULL, "Quaternary TDM6 Capture"},
+ {"QUAT_TDM_TX_7", NULL, "Quaternary TDM7 Capture"},
+
+ {"QUIN_TDM_TX_0", NULL, "Quinary TDM0 Capture"},
+ {"QUIN_TDM_TX_1", NULL, "Quinary TDM1 Capture"},
+ {"QUIN_TDM_TX_2", NULL, "Quinary TDM2 Capture"},
+ {"QUIN_TDM_TX_3", NULL, "Quinary TDM3 Capture"},
+ {"QUIN_TDM_TX_4", NULL, "Quinary TDM4 Capture"},
+ {"QUIN_TDM_TX_5", NULL, "Quinary TDM5 Capture"},
+ {"QUIN_TDM_TX_6", NULL, "Quinary TDM6 Capture"},
+ {"QUIN_TDM_TX_7", NULL, "Quinary TDM7 Capture"},
+
+>>>>>>>
+ {"TERT_MI2S_TX", NULL, "Tertiary MI2S Capture"},
+ {"PRI_MI2S_TX", NULL, "Primary MI2S Capture"},
+ {"SEC_MI2S_TX", NULL, "Secondary MI2S Capture"},
+ {"QUAT_MI2S_TX", NULL, "Quaternary MI2S Capture"},
+};
+
+static struct snd_soc_dai_ops q6hdmi_ops = {
+ .prepare = q6afe_dai_prepare,
+ .hw_params = q6hdmi_hw_params,
+ .shutdown = q6afe_dai_shutdown,
+<<<<<<<
+ .startup = q6afe_dai_startup,
+};
+
+static struct snd_soc_dai_ops q6i2s_ops = {
+ .prepare = q6afe_mi2s_prepare,
+ .hw_params = q6i2s_hw_params,
+ .set_fmt = q6i2s_set_fmt,
+ .shutdown = q6afe_dai_shutdown,
+ .startup = q6afe_dai_startup,
+=======
+};
+
+static struct snd_soc_dai_ops q6i2s_ops = {
+ .prepare = q6afe_dai_prepare,
+ .hw_params = q6i2s_hw_params,
+ .set_fmt = q6i2s_set_fmt,
+ .shutdown = q6afe_dai_shutdown,
+>>>>>>>
+ .set_sysclk = q6afe_mi2s_set_sysclk,
+};
+
+static struct snd_soc_dai_ops q6slim_ops = {
+ .prepare = q6afe_dai_prepare,
+ .hw_params = q6slim_hw_params,
+ .shutdown = q6afe_dai_shutdown,
+<<<<<<<
+ .set_channel_map = q6slim_set_channel_map,
+};
+
+static struct snd_soc_dai_ops q6tdm_ops = {
+ .prepare = q6afe_dai_prepare,
+ .shutdown = q6afe_dai_shutdown,
+ .set_sysclk = q6afe_mi2s_set_sysclk,
+ .set_tdm_slot = q6tdm_set_tdm_slot,
+ .set_channel_map = q6tdm_set_channel_map,
+ .hw_params = q6tdm_hw_params,
+};
+
+static int msm_dai_q6_dai_probe(struct snd_soc_dai *dai)
+{
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
+ struct q6afe_port *port;
+
+=======
+ .startup = q6afe_dai_startup,
+ .set_channel_map = q6slim_set_channel_map,
+};
+
+static int msm_dai_q6_dai_probe(struct snd_soc_dai *dai)
+{
+ struct q6afe_dai_data *dai_data = q6afe_get_dai_data(dai->dev);
+ struct snd_soc_dapm_context *dapm;
+ struct q6afe_port *port;
+
+ dapm = snd_soc_component_get_dapm(dai->component);
+
+>>>>>>>
+ port = q6afe_port_get_from_id(dai->dev, dai->id);
+ if (IS_ERR(port)) {
+ dev_err(dai->dev, "Unable to get afe port\n");
+ return -EINVAL;
+ }
+ dai_data->port[dai->id] = port;
+
+ return 0;
+}
+
+static int msm_dai_q6_dai_remove(struct snd_soc_dai *dai)
+{
+<<<<<<<
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
+
+ q6afe_port_put(dai_data->port[dai->id]);
+ dai_data->port[dai->id] = NULL;
+=======
+ struct q6afe_dai_data *dai_data = q6afe_get_dai_data(dai->dev);
+
+ q6afe_port_put(dai_data->port[dai->id]);
+>>>>>>>
+
+ return 0;
+}
+
+static struct snd_soc_dai_driver q6afe_dais[] = {
+ {
+ .playback = {
+ .stream_name = "HDMI Playback",
+ .rates = SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_96000 |
+<<<<<<<
+ SNDRV_PCM_RATE_192000,
+=======
+ SNDRV_PCM_RATE_192000,
+>>>>>>>
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+ .channels_min = 2,
+ .channels_max = 8,
+ .rate_max = 192000,
+ .rate_min = 48000,
+ },
+ .ops = &q6hdmi_ops,
+<<<<<<<
+ .id = AFE_PORT_HDMI_RX,
+=======
+ .id = HDMI_RX,
+>>>>>>>
+ .name = "HDMI",
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .name = "SLIMBUS_0_RX",
+ .ops = &q6slim_ops,
+ .id = SLIMBUS_0_RX,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ .playback = {
+ .stream_name = "Slimbus Playback",
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+<<<<<<<
+ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+=======
+ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+>>>>>>>
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ }, {
+ .playback = {
+ .stream_name = "Slimbus1 Playback",
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+<<<<<<<
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+=======
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+>>>>>>>
+ .channels_min = 1,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ .name = "SLIMBUS_1_RX",
+ .ops = &q6slim_ops,
+ .id = SLIMBUS_1_RX,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .playback = {
+ .stream_name = "Slimbus2 Playback",
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+<<<<<<<
+ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+=======
+ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+>>>>>>>
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ .name = "SLIMBUS_2_RX",
+ .ops = &q6slim_ops,
+ .id = SLIMBUS_2_RX,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .playback = {
+ .stream_name = "Slimbus3 Playback",
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+<<<<<<<
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+=======
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+>>>>>>>
+ .channels_min = 1,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ .name = "SLIMBUS_3_RX",
+ .ops = &q6slim_ops,
+ .id = SLIMBUS_3_RX,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .playback = {
+ .stream_name = "Slimbus4 Playback",
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+<<<<<<<
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+=======
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+>>>>>>>
+ .channels_min = 1,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ .name = "SLIMBUS_4_RX",
+ .ops = &q6slim_ops,
+ .id = SLIMBUS_4_RX,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .playback = {
+ .stream_name = "Slimbus5 Playback",
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+<<<<<<<
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+=======
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+>>>>>>>
+ .channels_min = 1,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ .name = "SLIMBUS_5_RX",
+ .ops = &q6slim_ops,
+ .id = SLIMBUS_5_RX,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .playback = {
+ .stream_name = "Slimbus6 Playback",
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+<<<<<<<
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+=======
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000 | SNDRV_PCM_RATE_44100,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+>>>>>>>
+ .channels_min = 1,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ .ops = &q6slim_ops,
+ .name = "SLIMBUS_6_RX",
+ .id = SLIMBUS_6_RX,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .playback = {
+ .stream_name = "Primary MI2S Playback",
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+<<<<<<<
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+=======
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+>>>>>>>
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .id = PRIMARY_MI2S_RX,
+ .name = "PRI_MI2S_RX",
+ .ops = &q6i2s_ops,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .capture = {
+ .stream_name = "Primary MI2S Capture",
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+<<<<<<<
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+=======
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+>>>>>>>
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .id = PRIMARY_MI2S_TX,
+ .name = "PRI_MI2S_TX",
+ .ops = &q6i2s_ops,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .playback = {
+ .stream_name = "Secondary MI2S Playback",
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+<<<<<<<
+ SNDRV_PCM_RATE_16000,
+=======
+ SNDRV_PCM_RATE_16000,
+>>>>>>>
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .name = "SEC_MI2S_RX",
+ .id = SECONDARY_MI2S_RX,
+ .ops = &q6i2s_ops,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .capture = {
+ .stream_name = "Secondary MI2S Capture",
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+<<<<<<<
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+=======
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+>>>>>>>
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .id = SECONDARY_MI2S_TX,
+ .name = "SEC_MI2S_TX",
+ .ops = &q6i2s_ops,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .playback = {
+ .stream_name = "Tertiary MI2S Playback",
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+<<<<<<<
+ SNDRV_PCM_RATE_16000,
+=======
+ SNDRV_PCM_RATE_16000,
+>>>>>>>
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .name = "TERT_MI2S_RX",
+ .id = TERTIARY_MI2S_RX,
+ .ops = &q6i2s_ops,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .capture = {
+ .stream_name = "Tertiary MI2S Capture",
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+<<<<<<<
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+=======
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+>>>>>>>
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .id = TERTIARY_MI2S_TX,
+ .name = "TERT_MI2S_TX",
+ .ops = &q6i2s_ops,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .playback = {
+ .stream_name = "Quaternary MI2S Playback",
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+<<<<<<<
+ SNDRV_PCM_RATE_16000,
+=======
+ SNDRV_PCM_RATE_16000,
+>>>>>>>
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .name = "QUAT_MI2S_RX",
+ .id = QUATERNARY_MI2S_RX,
+ .ops = &q6i2s_ops,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .capture = {
+ .stream_name = "Quaternary MI2S Capture",
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+<<<<<<<
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+=======
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+>>>>>>>
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .id = QUATERNARY_MI2S_TX,
+ .name = "QUAT_MI2S_TX",
+ .ops = &q6i2s_ops,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ },
+<<<<<<<
+=======
+ Q6AFE_TDM_PB_DAI("Primary", 0, PRIMARY_TDM_RX_0),
+ Q6AFE_TDM_PB_DAI("Primary", 1, PRIMARY_TDM_RX_1),
+ Q6AFE_TDM_PB_DAI("Primary", 2, PRIMARY_TDM_RX_2),
+ Q6AFE_TDM_PB_DAI("Primary", 3, PRIMARY_TDM_RX_3),
+ Q6AFE_TDM_PB_DAI("Primary", 4, PRIMARY_TDM_RX_4),
+ Q6AFE_TDM_PB_DAI("Primary", 5, PRIMARY_TDM_RX_5),
+ Q6AFE_TDM_PB_DAI("Primary", 6, PRIMARY_TDM_RX_6),
+ Q6AFE_TDM_PB_DAI("Primary", 7, PRIMARY_TDM_RX_7),
+ Q6AFE_TDM_CAP_DAI("Primary", 0, PRIMARY_TDM_TX_0),
+ Q6AFE_TDM_CAP_DAI("Primary", 1, PRIMARY_TDM_TX_1),
+ Q6AFE_TDM_CAP_DAI("Primary", 2, PRIMARY_TDM_TX_2),
+ Q6AFE_TDM_CAP_DAI("Primary", 3, PRIMARY_TDM_TX_3),
+ Q6AFE_TDM_CAP_DAI("Primary", 4, PRIMARY_TDM_TX_4),
+ Q6AFE_TDM_CAP_DAI("Primary", 5, PRIMARY_TDM_TX_5),
+ Q6AFE_TDM_CAP_DAI("Primary", 6, PRIMARY_TDM_TX_6),
+ Q6AFE_TDM_CAP_DAI("Primary", 7, PRIMARY_TDM_TX_7),
+ Q6AFE_TDM_PB_DAI("Secondary", 0, SECONDARY_TDM_RX_0),
+ Q6AFE_TDM_PB_DAI("Secondary", 1, SECONDARY_TDM_RX_1),
+ Q6AFE_TDM_PB_DAI("Secondary", 2, SECONDARY_TDM_RX_2),
+ Q6AFE_TDM_PB_DAI("Secondary", 3, SECONDARY_TDM_RX_3),
+ Q6AFE_TDM_PB_DAI("Secondary", 4, SECONDARY_TDM_RX_4),
+ Q6AFE_TDM_PB_DAI("Secondary", 5, SECONDARY_TDM_RX_5),
+ Q6AFE_TDM_PB_DAI("Secondary", 6, SECONDARY_TDM_RX_6),
+ Q6AFE_TDM_PB_DAI("Secondary", 7, SECONDARY_TDM_RX_7),
+ Q6AFE_TDM_CAP_DAI("Secondary", 0, SECONDARY_TDM_TX_0),
+ Q6AFE_TDM_CAP_DAI("Secondary", 1, SECONDARY_TDM_TX_1),
+ Q6AFE_TDM_CAP_DAI("Secondary", 2, SECONDARY_TDM_TX_2),
+ Q6AFE_TDM_CAP_DAI("Secondary", 3, SECONDARY_TDM_TX_3),
+ Q6AFE_TDM_CAP_DAI("Secondary", 4, SECONDARY_TDM_TX_4),
+ Q6AFE_TDM_CAP_DAI("Secondary", 5, SECONDARY_TDM_TX_5),
+ Q6AFE_TDM_CAP_DAI("Secondary", 6, SECONDARY_TDM_TX_6),
+ Q6AFE_TDM_CAP_DAI("Secondary", 7, SECONDARY_TDM_TX_7),
+ Q6AFE_TDM_PB_DAI("Tertiary", 0, TERTIARY_TDM_RX_0),
+ Q6AFE_TDM_PB_DAI("Tertiary", 1, TERTIARY_TDM_RX_1),
+ Q6AFE_TDM_PB_DAI("Tertiary", 2, TERTIARY_TDM_RX_2),
+ Q6AFE_TDM_PB_DAI("Tertiary", 3, TERTIARY_TDM_RX_3),
+ Q6AFE_TDM_PB_DAI("Tertiary", 4, TERTIARY_TDM_RX_4),
+ Q6AFE_TDM_PB_DAI("Tertiary", 5, TERTIARY_TDM_RX_5),
+ Q6AFE_TDM_PB_DAI("Tertiary", 6, TERTIARY_TDM_RX_6),
+ Q6AFE_TDM_PB_DAI("Tertiary", 7, TERTIARY_TDM_RX_7),
+ Q6AFE_TDM_CAP_DAI("Tertiary", 0, TERTIARY_TDM_TX_0),
+ Q6AFE_TDM_CAP_DAI("Tertiary", 1, TERTIARY_TDM_TX_1),
+ Q6AFE_TDM_CAP_DAI("Tertiary", 2, TERTIARY_TDM_TX_2),
+ Q6AFE_TDM_CAP_DAI("Tertiary", 3, TERTIARY_TDM_TX_3),
+ Q6AFE_TDM_CAP_DAI("Tertiary", 4, TERTIARY_TDM_TX_4),
+ Q6AFE_TDM_CAP_DAI("Tertiary", 5, TERTIARY_TDM_TX_5),
+ Q6AFE_TDM_CAP_DAI("Tertiary", 6, TERTIARY_TDM_TX_6),
+ Q6AFE_TDM_CAP_DAI("Tertiary", 7, TERTIARY_TDM_TX_7),
+ Q6AFE_TDM_PB_DAI("Quaternary", 0, QUATERNARY_TDM_RX_0),
+ Q6AFE_TDM_PB_DAI("Quaternary", 1, QUATERNARY_TDM_RX_1),
+ Q6AFE_TDM_PB_DAI("Quaternary", 2, QUATERNARY_TDM_RX_2),
+ Q6AFE_TDM_PB_DAI("Quaternary", 3, QUATERNARY_TDM_RX_3),
+ Q6AFE_TDM_PB_DAI("Quaternary", 4, QUATERNARY_TDM_RX_4),
+ Q6AFE_TDM_PB_DAI("Quaternary", 5, QUATERNARY_TDM_RX_5),
+ Q6AFE_TDM_PB_DAI("Quaternary", 6, QUATERNARY_TDM_RX_6),
+ Q6AFE_TDM_PB_DAI("Quaternary", 7, QUATERNARY_TDM_RX_7),
+ Q6AFE_TDM_CAP_DAI("Quaternary", 0, QUATERNARY_TDM_TX_0),
+ Q6AFE_TDM_CAP_DAI("Quaternary", 1, QUATERNARY_TDM_TX_1),
+ Q6AFE_TDM_CAP_DAI("Quaternary", 2, QUATERNARY_TDM_TX_2),
+ Q6AFE_TDM_CAP_DAI("Quaternary", 3, QUATERNARY_TDM_TX_3),
+ Q6AFE_TDM_CAP_DAI("Quaternary", 4, QUATERNARY_TDM_TX_4),
+ Q6AFE_TDM_CAP_DAI("Quaternary", 5, QUATERNARY_TDM_TX_5),
+ Q6AFE_TDM_CAP_DAI("Quaternary", 6, QUATERNARY_TDM_TX_6),
+ Q6AFE_TDM_CAP_DAI("Quaternary", 7, QUATERNARY_TDM_TX_7),
+ Q6AFE_TDM_PB_DAI("Quinary", 0, QUINARY_TDM_RX_0),
+ Q6AFE_TDM_PB_DAI("Quinary", 1, QUINARY_TDM_RX_1),
+ Q6AFE_TDM_PB_DAI("Quinary", 2, QUINARY_TDM_RX_2),
+ Q6AFE_TDM_PB_DAI("Quinary", 3, QUINARY_TDM_RX_3),
+ Q6AFE_TDM_PB_DAI("Quinary", 4, QUINARY_TDM_RX_4),
+ Q6AFE_TDM_PB_DAI("Quinary", 5, QUINARY_TDM_RX_5),
+ Q6AFE_TDM_PB_DAI("Quinary", 6, QUINARY_TDM_RX_6),
+ Q6AFE_TDM_PB_DAI("Quinary", 7, QUINARY_TDM_RX_7),
+ Q6AFE_TDM_CAP_DAI("Quinary", 0, QUINARY_TDM_TX_0),
+ Q6AFE_TDM_CAP_DAI("Quinary", 1, QUINARY_TDM_TX_1),
+ Q6AFE_TDM_CAP_DAI("Quinary", 2, QUINARY_TDM_TX_2),
+ Q6AFE_TDM_CAP_DAI("Quinary", 3, QUINARY_TDM_TX_3),
+ Q6AFE_TDM_CAP_DAI("Quinary", 4, QUINARY_TDM_TX_4),
+ Q6AFE_TDM_CAP_DAI("Quinary", 5, QUINARY_TDM_TX_5),
+ Q6AFE_TDM_CAP_DAI("Quinary", 6, QUINARY_TDM_TX_6),
+ Q6AFE_TDM_CAP_DAI("Quinary", 7, QUINARY_TDM_TX_7),
+>>>>>>>
+};
+
+static int q6afe_of_xlate_dai_name(struct snd_soc_component *component,
+ struct of_phandle_args *args,
+ const char **dai_name)
+{
+ int id = args->args[0];
+<<<<<<<
+ int i, ret = -EINVAL;
+=======
+ int ret = -EINVAL;
+ int i;
+>>>>>>>
+
+ for (i = 0; i < ARRAY_SIZE(q6afe_dais); i++) {
+ if (q6afe_dais[i].id == id) {
+ *dai_name = q6afe_dais[i].name;
+ ret = 0;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static const struct snd_soc_dapm_widget q6afe_dai_widgets[] = {
+ SND_SOC_DAPM_AIF_OUT("HDMI_RX", "HDMI Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_0_RX", "Slimbus Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_1_RX", "Slimbus1 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_2_RX", "Slimbus2 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_3_RX", "Slimbus3 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_4_RX", "Slimbus4 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_5_RX", "Slimbus5 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_6_RX", "Slimbus6 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUAT_MI2S_RX", "Quaternary MI2S Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUAT_MI2S_TX", "Quaternary MI2S Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("TERT_MI2S_RX", "Tertiary MI2S Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("TERT_MI2S_TX", "Tertiary MI2S Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SEC_MI2S_RX", "Secondary MI2S Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SEC_MI2S_TX", "Secondary MI2S Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SEC_MI2S_RX_SD1",
+ "Secondary MI2S Playback SD1",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("PRI_MI2S_RX", "Primary MI2S Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("PRI_MI2S_TX", "Primary MI2S Capture",
+ 0, 0, 0, 0),
+<<<<<<<
+=======
+
+ SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_0", "Primary TDM0 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_1", "Primary TDM1 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_2", "Primary TDM2 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_3", "Primary TDM3 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_4", "Primary TDM4 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_5", "Primary TDM5 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_6", "Primary TDM6 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_7", "Primary TDM7 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_0", "Primary TDM0 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_1", "Primary TDM1 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_2", "Primary TDM2 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_3", "Primary TDM3 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_4", "Primary TDM4 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_5", "Primary TDM5 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_6", "Primary TDM6 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_7", "Primary TDM7 Capture",
+ 0, 0, 0, 0),
+
+ SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_0", "Secondary TDM0 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_1", "Secondary TDM1 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_2", "Secondary TDM2 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_3", "Secondary TDM3 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_4", "Secondary TDM4 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_5", "Secondary TDM5 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_6", "Secondary TDM6 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_7", "Secondary TDM7 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_0", "Secondary TDM0 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_1", "Secondary TDM1 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_2", "Secondary TDM2 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_3", "Secondary TDM3 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_4", "Secondary TDM4 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_5", "Secondary TDM5 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_6", "Secondary TDM6 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_7", "Secondary TDM7 Capture",
+ 0, 0, 0, 0),
+
+ SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_0", "Tertiary TDM0 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_1", "Tertiary TDM1 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_2", "Tertiary TDM2 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_3", "Tertiary TDM3 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_4", "Tertiary TDM4 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_5", "Tertiary TDM5 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_6", "Tertiary TDM6 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_7", "Tertiary TDM7 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_0", "Tertiary TDM0 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_1", "Tertiary TDM1 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_2", "Tertiary TDM2 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_3", "Tertiary TDM3 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_4", "Tertiary TDM4 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_5", "Tertiary TDM5 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_6", "Tertiary TDM6 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_7", "Tertiary TDM7 Capture",
+ 0, 0, 0, 0),
+
+ SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_0", "Quaternary TDM0 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_1", "Quaternary TDM1 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_2", "Quaternary TDM2 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_3", "Quaternary TDM3 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_4", "Quaternary TDM4 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_5", "Quaternary TDM5 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_6", "Quaternary TDM6 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_7", "Quaternary TDM7 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_0", "Quaternary TDM0 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_1", "Quaternary TDM1 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_2", "Quaternary TDM2 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_3", "Quaternary TDM3 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_4", "Quaternary TDM4 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_5", "Quaternary TDM5 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_6", "Quaternary TDM6 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_7", "Quaternary TDM7 Capture",
+ 0, 0, 0, 0),
+
+ SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_0", "Quinary TDM0 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_1", "Quinary TDM1 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_2", "Quinary TDM2 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_3", "Quinary TDM3 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_4", "Quinary TDM4 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_5", "Quinary TDM5 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_6", "Quinary TDM6 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_7", "Quinary TDM7 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_0", "Quinary TDM0 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_1", "Quinary TDM1 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_2", "Quinary TDM2 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_3", "Quinary TDM3 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_4", "Quinary TDM4 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_5", "Quinary TDM5 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_6", "Quinary TDM6 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_7", "Quinary TDM7 Capture",
+ 0, 0, 0, 0),
+>>>>>>>
+};
+
+static const struct snd_soc_component_driver q6afe_dai_component = {
+ .name = "q6afe-dai-component",
+ .dapm_widgets = q6afe_dai_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(q6afe_dai_widgets),
+<<<<<<<
+=======
+ .controls = q6afe_config_controls,
+ .num_controls = ARRAY_SIZE(q6afe_config_controls),
+>>>>>>>
+ .dapm_routes = q6afe_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(q6afe_dapm_routes),
+ .of_xlate_dai_name = q6afe_of_xlate_dai_name,
+
+};
+
+<<<<<<<
+int q6afe_dai_dev_probe(struct device *dev)
+{
+ int rc = 0;
+ struct q6afe_dai_data *dai_data;
+
+ dai_data = devm_kzalloc(dev, sizeof(*dai_data), GFP_KERNEL);
+ if (!dai_data)
+ rc = -ENOMEM;
+
+ q6afe_set_dai_data(dev, dai_data);
+
+ return devm_snd_soc_register_component(dev, &q6afe_dai_component,
+ q6afe_dais, ARRAY_SIZE(q6afe_dais));
+}
+EXPORT_SYMBOL_GPL(q6afe_dai_dev_probe);
+
+int q6afe_dai_dev_remove(struct device *dev)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(q6afe_dai_dev_remove);
+=======
+static void of_q6afe_parse_dai_data(struct device *dev,
+ struct q6afe_dai_data *data)
+{
+ struct device_node *node;
+ int ret;
+
+ for_each_child_of_node(dev->of_node, node) {
+ unsigned int lines[Q6AFE_MAX_MI2S_LINES];
+ struct q6afe_dai_priv_data *priv;
+ int id, i, num_lines;
+
+ ret = of_property_read_u32(node, "reg", &id);
+ if (ret || id > AFE_PORT_MAX) {
+ dev_err(dev, "valid dai id not found:%d\n", ret);
+ continue;
+ }
+
+ switch (id) {
+ /* MI2S specific properties */
+ case PRIMARY_MI2S_RX ... QUATERNARY_MI2S_TX:
+ priv = &data->priv[id];
+ ret = of_property_read_variable_u32_array(node,
+ "qcom,sd-lines",
+ lines, 0,
+ Q6AFE_MAX_MI2S_LINES);
+ if (ret < 0)
+ num_lines = 0;
+ else
+ num_lines = ret;
+
+ priv->sd_line_mask = 0;
+
+ for (i = 0; i < num_lines; i++)
+ priv->sd_line_mask |= BIT(lines[i]);
+
+ break;
+ case PRIMARY_TDM_RX_0 ... QUINARY_TDM_TX_7:
+ priv = &data->priv[id];
+ ret = of_property_read_u32(node, "qcom,tdm-sync-mode",
+ &priv->sync_mode);
+ if (ret) {
+ dev_err(dev, "No Sync mode from DT\n");
+ break;
+ }
+ ret = of_property_read_u32(node, "qcom,tdm-sync-src",
+ &priv->sync_src);
+ if (ret) {
+ dev_err(dev, "No Sync Src from DT\n");
+ break;
+ }
+ ret = of_property_read_u32(node, "qcom,tdm-data-out",
+ &priv->data_out_enable);
+ if (ret) {
+ dev_err(dev, "No Data out enable from DT\n");
+ break;
+ }
+ ret = of_property_read_u32(node, "qcom,tdm-invert-sync",
+ &priv->invert_sync);
+ if (ret) {
+ dev_err(dev, "No Invert sync from DT\n");
+ break;
+ }
+ ret = of_property_read_u32(node, "qcom,tdm-data-delay",
+ &priv->data_delay);
+ if (ret) {
+ dev_err(dev, "No Data Delay from DT\n");
+ break;
+ }
+ ret = of_property_read_u32(node, "qcom,tdm-data-align",
+ &priv->data_align);
+ if (ret) {
+ dev_err(dev, "No Data align from DT\n");
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static int q6afe_dai_bind(struct device *dev, struct device *master, void *data)
+{
+ struct q6afe_dai_data *dai_data;
+
+ dai_data = kzalloc(sizeof(*dai_data), GFP_KERNEL);
+ if (!dai_data)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, dai_data);
+
+ of_q6afe_parse_dai_data(dev, dai_data);
+
+ return snd_soc_register_component(dev, &q6afe_dai_component,
+ q6afe_dais, ARRAY_SIZE(q6afe_dais));
+}
+
+static void q6afe_dai_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dev);
+
+ snd_soc_unregister_component(dev);
+ kfree(dai_data);
+}
+
+static const struct component_ops q6afe_dai_comp_ops = {
+ .bind = q6afe_dai_bind,
+ .unbind = q6afe_dai_unbind,
+};
+
+static int q6afe_dai_dev_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &q6afe_dai_comp_ops);
+}
+
+static int q6afe_dai_dev_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &q6afe_dai_comp_ops);
+ return 0;
+}
+
+static struct platform_driver q6afe_dai_platform_driver = {
+ .driver = {
+ .name = "q6afe-dai",
+ },
+ .probe = q6afe_dai_dev_probe,
+ .remove = q6afe_dai_dev_remove,
+};
+module_platform_driver(q6afe_dai_platform_driver);
+
+>>>>>>>
+MODULE_DESCRIPTION("Q6 Audio Fronend dai driver");
+MODULE_LICENSE("GPL v2");
diff --git a/rr-cache/df72d29db70d6c63c309680f5104de95fee0c2ce/thisimage b/rr-cache/df72d29db70d6c63c309680f5104de95fee0c2ce/thisimage
new file mode 100644
index 0000000..b966e8e
--- /dev/null
+++ b/rr-cache/df72d29db70d6c63c309680f5104de95fee0c2ce/thisimage
@@ -0,0 +1,1638 @@
+// SPDX-License-Identifier: GPL-2.0
+<<<<<<<
+/*
+ * Copyright (c) 2011-2016, The Linux Foundation
+ * Copyright (c) 2018, Linaro Limited
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+=======
+// Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+// Copyright (c) 2018, Linaro Limited
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/component.h>
+>>>>>>>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/pcm_params.h>
+#include "q6afe.h"
+
+<<<<<<<
+=======
+#define Q6AFE_TDM_PB_DAI(pre, num, did) { \
+ .playback = { \
+ .stream_name = pre" TDM"#num" Playback", \
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |\
+ SNDRV_PCM_RATE_176400, \
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE, \
+ .channels_min = 1, \
+ .channels_max = 8, \
+ .rate_min = 8000, \
+ .rate_max = 176400, \
+ }, \
+ .name = #did, \
+ .ops = &q6tdm_ops, \
+ .id = did, \
+ .probe = msm_dai_q6_dai_probe, \
+ .remove = msm_dai_q6_dai_remove, \
+ }
+
+#define Q6AFE_TDM_CAP_DAI(pre, num, did) { \
+ .capture = { \
+ .stream_name = pre" TDM"#num" Capture", \
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |\
+ SNDRV_PCM_RATE_176400, \
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE, \
+ .channels_min = 1, \
+ .channels_max = 8, \
+ .rate_min = 8000, \
+ .rate_max = 176400, \
+ }, \
+ .name = #did, \
+ .ops = &q6tdm_ops, \
+ .id = did, \
+ .probe = msm_dai_q6_dai_probe, \
+ .remove = msm_dai_q6_dai_remove, \
+ }
+
+struct q6afe_dai_priv_data {
+ uint32_t sd_line_mask;
+ uint32_t sync_mode;
+ uint32_t sync_src;
+ uint32_t data_out_enable;
+ uint32_t invert_sync;
+ uint32_t data_delay;
+ uint32_t data_align;
+};
+
+>>>>>>>
+struct q6afe_dai_data {
+ struct q6afe_port *port[AFE_PORT_MAX];
+ struct q6afe_port_config port_config[AFE_PORT_MAX];
+ bool is_port_started[AFE_PORT_MAX];
+<<<<<<<
+ struct q6afe_dai_priv_data priv[AFE_PORT_MAX];
+=======
+};
+
+static int q6hdmi_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct q6afe_dai_data *dai_data = kcontrol->private_data;
+ int value = ucontrol->value.integer.value[0];
+
+ dai_data->port_config[AFE_PORT_HDMI_RX].hdmi.datatype = value;
+
+ return 0;
+}
+
+static int q6hdmi_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+
+ struct q6afe_dai_data *dai_data = kcontrol->private_data;
+
+ ucontrol->value.integer.value[0] =
+ dai_data->port_config[AFE_PORT_HDMI_RX].hdmi.datatype;
+
+ return 0;
+}
+
+static const char * const hdmi_format[] = {
+ "LPCM",
+ "Compr"
+};
+
+static const struct soc_enum hdmi_config_enum[] = {
+ SOC_ENUM_SINGLE_EXT(2, hdmi_format),
+};
+
+static const struct snd_kcontrol_new q6afe_config_controls[] = {
+ SOC_ENUM_EXT("HDMI RX Format", hdmi_config_enum[0],
+ q6hdmi_format_get,
+ q6hdmi_format_put),
+>>>>>>>
+};
+
+static int q6slim_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+
+<<<<<<<
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
+=======
+ struct q6afe_dai_data *dai_data = q6afe_get_dai_data(dai->dev);
+>>>>>>>
+ struct q6afe_slim_cfg *slim = &dai_data->port_config[dai->id].slim;
+
+ slim->num_channels = params_channels(params);
+ slim->sample_rate = params_rate(params);
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ case SNDRV_PCM_FORMAT_SPECIAL:
+ slim->bit_width = 16;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ slim->bit_width = 24;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ slim->bit_width = 32;
+ break;
+ default:
+ pr_err("%s: format %d\n",
+ __func__, params_format(params));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int q6hdmi_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+<<<<<<<
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
+=======
+ struct q6afe_dai_data *dai_data = q6afe_get_dai_data(dai->dev);
+>>>>>>>
+ int channels = params_channels(params);
+ struct q6afe_hdmi_cfg *hdmi = &dai_data->port_config[dai->id].hdmi;
+
+ hdmi->sample_rate = params_rate(params);
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ hdmi->bit_width = 16;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ hdmi->bit_width = 24;
+ break;
+ }
+
+<<<<<<<
+ /* HDMI spec CEA-861-E: Table 28 Audio InfoFrame Data Byte 4 */
+=======
+ /*refer to HDMI spec CEA-861-E: Table 28 Audio InfoFrame Data Byte 4*/
+>>>>>>>
+ switch (channels) {
+ case 2:
+ hdmi->channel_allocation = 0;
+ break;
+ case 3:
+ hdmi->channel_allocation = 0x02;
+ break;
+ case 4:
+ hdmi->channel_allocation = 0x06;
+ break;
+ case 5:
+ hdmi->channel_allocation = 0x0A;
+ break;
+ case 6:
+ hdmi->channel_allocation = 0x0B;
+ break;
+ case 7:
+ hdmi->channel_allocation = 0x12;
+ break;
+ case 8:
+ hdmi->channel_allocation = 0x13;
+ break;
+ default:
+ dev_err(dai->dev, "invalid Channels = %u\n", channels);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int q6i2s_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+<<<<<<<
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
+ struct q6afe_i2s_cfg *i2s = &dai_data->port_config[dai->id].i2s_cfg;
+
+ i2s->sample_rate = params_rate(params);
+ i2s->bit_width = params_width(params);
+ i2s->num_channels = params_channels(params);
+ i2s->sd_line_mask = dai_data->priv[dai->id].sd_line_mask;
+=======
+ struct q6afe_dai_data *dai_data = q6afe_get_dai_data(dai->dev);
+ struct q6afe_i2s_cfg *i2s = &dai_data->port_config[dai->id].i2s_cfg;
+
+
+ i2s->sample_rate = params_rate(params);
+ i2s->bit_width = params_width(params);
+ i2s->num_channels = params_channels(params);
+>>>>>>>
+
+ return 0;
+}
+
+static int q6i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+<<<<<<<
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
+=======
+ struct q6afe_dai_data *dai_data = q6afe_get_dai_data(dai->dev);
+>>>>>>>
+ struct q6afe_i2s_cfg *i2s = &dai_data->port_config[dai->id].i2s_cfg;
+
+ i2s->fmt = fmt;
+
+ return 0;
+}
+
+<<<<<<<
+static int q6afe_dai_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct q6afe_dai_data *dai_data = q6afe_get_dai_data(dai->dev);
+
+ dai_data->is_port_started[dai->id] = false;
+
+ return 0;
+}
+
+static void q6afe_dai_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct q6afe_dai_data *dai_data = q6afe_get_dai_data(dai->dev);
+ int rc;
+
+ rc = q6afe_port_stop(dai_data->port[dai->id]);
+ if (rc < 0)
+ dev_err(dai->dev, "fail to close AFE port\n");
+
+ dai_data->is_port_started[dai->id] = false;
+
+}
+
+static int q6afe_mi2s_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct q6afe_dai_data *dai_data = q6afe_get_dai_data(dai->dev);
+ int rc;
+
+ if (dai_data->is_port_started[dai->id]) {
+ /* stop the port and restart with new port config */
+ rc = q6afe_port_stop(dai_data->port[dai->id]);
+ if (rc < 0) {
+ dev_err(dai->dev, "fail to close AFE port\n");
+ return rc;
+ }
+ }
+
+ q6afe_i2s_port_prepare(dai_data->port[dai->id],
+ &dai_data->port_config[dai->id].i2s_cfg);
+
+ rc = q6afe_port_start(dai_data->port[dai->id]);
+ if (rc < 0) {
+ dev_err(dai->dev, "fail to start AFE port %x\n", dai->id);
+ return rc;
+ }
+ dai_data->is_port_started[dai->id] = true;
+
+ return 0;
+=======
+static int q6tdm_set_tdm_slot(struct snd_soc_dai *dai,
+ unsigned int tx_mask,
+ unsigned int rx_mask,
+ int slots, int slot_width)
+{
+
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
+ struct q6afe_tdm_cfg *tdm = &dai_data->port_config[dai->id].tdm;
+ unsigned int cap_mask;
+ int rc = 0;
+
+ /* HW only supports 16 and 32 bit slot width configuration */
+ if ((slot_width != 16) && (slot_width != 32)) {
+ dev_err(dai->dev, "%s: invalid slot_width %d\n",
+ __func__, slot_width);
+ return -EINVAL;
+ }
+
+ /* HW supports 1-32 slots configuration. Typical: 1, 2, 4, 8, 16, 32 */
+ switch (slots) {
+ case 2:
+ cap_mask = 0x03;
+ break;
+ case 4:
+ cap_mask = 0x0F;
+ break;
+ case 8:
+ cap_mask = 0xFF;
+ break;
+ case 16:
+ cap_mask = 0xFFFF;
+ break;
+ default:
+ dev_err(dai->dev, "%s: invalid slots %d\n",
+ __func__, slots);
+ return -EINVAL;
+ }
+
+ switch (dai->id) {
+ case PRIMARY_TDM_RX_0 ... QUINARY_TDM_TX_7:
+ tdm->nslots_per_frame = slots;
+ tdm->slot_width = slot_width;
+ /* TDM RX dais ids are even and tx are odd */
+ tdm->slot_mask = (dai->id & 0x1 ? tx_mask : rx_mask) & cap_mask;
+ break;
+ default:
+ dev_err(dai->dev, "%s: invalid dai id 0x%x\n",
+ __func__, dai->id);
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int q6tdm_set_channel_map(struct snd_soc_dai *dai,
+ unsigned int tx_num, unsigned int *tx_slot,
+ unsigned int rx_num, unsigned int *rx_slot)
+{
+
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
+ struct q6afe_tdm_cfg *tdm = &dai_data->port_config[dai->id].tdm;
+ int rc = 0;
+ int i = 0;
+
+ switch (dai->id) {
+ case PRIMARY_TDM_RX_0 ... QUINARY_TDM_TX_7:
+ if (dai->id & 0x1) {
+ if (!tx_slot) {
+ dev_err(dai->dev, "tx slot not found\n");
+ return -EINVAL;
+ }
+ if (tx_num > AFE_PORT_MAX_AUDIO_CHAN_CNT) {
+ dev_err(dai->dev, "invalid tx num %d\n",
+ tx_num);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < tx_num; i++)
+ tdm->ch_mapping[i] = tx_slot[i];
+
+ for (i = tx_num; i < AFE_PORT_MAX_AUDIO_CHAN_CNT; i++)
+ tdm->ch_mapping[i] = Q6AFE_CMAP_INVALID;
+
+ tdm->num_channels = tx_num;
+ } else {
+ /* rx */
+ if (!rx_slot) {
+ dev_err(dai->dev, "rx slot not found\n");
+ return -EINVAL;
+ }
+ if (rx_num > AFE_PORT_MAX_AUDIO_CHAN_CNT) {
+ dev_err(dai->dev, "invalid rx num %d\n",
+ rx_num);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < rx_num; i++)
+ tdm->ch_mapping[i] = rx_slot[i];
+
+ for (i = rx_num; i < AFE_PORT_MAX_AUDIO_CHAN_CNT; i++)
+ tdm->ch_mapping[i] = Q6AFE_CMAP_INVALID;
+
+ tdm->num_channels = rx_num;
+ }
+
+ break;
+ default:
+ dev_err(dai->dev, "%s: invalid dai id 0x%x\n",
+ __func__, dai->id);
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int q6tdm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
+ struct q6afe_tdm_cfg *tdm = &dai_data->port_config[dai->id].tdm;
+
+ tdm->bit_width = params_width(params);
+ tdm->sample_rate = params_rate(params);
+ tdm->num_channels = params_channels(params);
+ tdm->data_align_type = dai_data->priv[dai->id].data_align;
+ tdm->sync_src = dai_data->priv[dai->id].sync_src;
+ tdm->sync_mode = dai_data->priv[dai->id].sync_mode;
+
+ return 0;
+}
+static void q6afe_dai_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
+ int rc;
+
+ rc = q6afe_port_stop(dai_data->port[dai->id]);
+ if (rc < 0)
+ dev_err(dai->dev, "fail to close AFE port (%d)\n", rc);
+
+ dai_data->is_port_started[dai->id] = false;
+
+>>>>>>>
+}
+
+static int q6afe_dai_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+<<<<<<<
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
+=======
+ struct q6afe_dai_data *dai_data = q6afe_get_dai_data(dai->dev);
+>>>>>>>
+ int rc;
+
+ if (dai_data->is_port_started[dai->id]) {
+ /* stop the port and restart with new port config */
+ rc = q6afe_port_stop(dai_data->port[dai->id]);
+ if (rc < 0) {
+<<<<<<<
+ dev_err(dai->dev, "fail to close AFE port (%d)\n", rc);
+=======
+ dev_err(dai->dev, "fail to close AFE port\n");
+>>>>>>>
+ return rc;
+ }
+ }
+
+<<<<<<<
+ if (dai->id == AFE_PORT_HDMI_RX)
+ q6afe_hdmi_port_prepare(dai_data->port[dai->id],
+ &dai_data->port_config[dai->id].hdmi);
+ else if (dai->id >= SLIMBUS_0_RX && dai->id <= SLIMBUS_6_TX)
+ q6afe_slim_port_prepare(dai_data->port[dai->id],
+ &dai_data->port_config[dai->id].slim);
+=======
+ switch (dai->id) {
+ case HDMI_RX:
+ q6afe_hdmi_port_prepare(dai_data->port[dai->id],
+ &dai_data->port_config[dai->id].hdmi);
+ break;
+ case SLIMBUS_0_RX ... SLIMBUS_6_TX:
+ q6afe_slim_port_prepare(dai_data->port[dai->id],
+ &dai_data->port_config[dai->id].slim);
+ break;
+ case PRIMARY_MI2S_RX ... QUATERNARY_MI2S_TX:
+ rc = q6afe_i2s_port_prepare(dai_data->port[dai->id],
+ &dai_data->port_config[dai->id].i2s_cfg);
+ if (rc < 0) {
+ dev_err(dai->dev, "fail to prepare AFE port %x\n",
+ dai->id);
+ return rc;
+ }
+ break;
+ case PRIMARY_TDM_RX_0 ... QUINARY_TDM_TX_7:
+ q6afe_tdm_port_prepare(dai_data->port[dai->id],
+ &dai_data->port_config[dai->id].tdm);
+ break;
+ default:
+ return -EINVAL;
+ }
+>>>>>>>
+
+ rc = q6afe_port_start(dai_data->port[dai->id]);
+ if (rc < 0) {
+ dev_err(dai->dev, "fail to start AFE port %x\n", dai->id);
+ return rc;
+ }
+ dai_data->is_port_started[dai->id] = true;
+
+ return 0;
+}
+
+static int q6slim_set_channel_map(struct snd_soc_dai *dai,
+ unsigned int tx_num, unsigned int *tx_slot,
+ unsigned int rx_num, unsigned int *rx_slot)
+{
+<<<<<<<
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
+=======
+ struct q6afe_dai_data *dai_data = q6afe_get_dai_data(dai->dev);
+>>>>>>>
+ struct q6afe_port_config *pcfg = &dai_data->port_config[dai->id];
+ int i;
+
+ if (!rx_slot) {
+ pr_err("%s: rx slot not found\n", __func__);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < rx_num; i++) {
+ pcfg->slim.ch_mapping[i] = rx_slot[i];
+ pr_debug("%s: find number of channels[%d] ch[%d]\n",
+ __func__, i, rx_slot[i]);
+ }
+
+ pcfg->slim.num_channels = rx_num;
+
+ pr_debug("%s: SLIMBUS_%d_RX cnt[%d] ch[%d %d]\n", __func__,
+ (dai->id - SLIMBUS_0_RX) / 2, rx_num,
+ pcfg->slim.ch_mapping[0],
+ pcfg->slim.ch_mapping[1]);
+
+ return 0;
+}
+
+static int q6afe_mi2s_set_sysclk(struct snd_soc_dai *dai,
+ int clk_id, unsigned int freq, int dir)
+{
+<<<<<<<
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
+=======
+ struct q6afe_dai_data *dai_data = q6afe_get_dai_data(dai->dev);
+>>>>>>>
+ struct q6afe_port *port = dai_data->port[dai->id];
+
+ switch (clk_id) {
+ case LPAIF_DIG_CLK:
+ return q6afe_port_set_sysclk(port, clk_id, 0, 5, freq, dir);
+ case LPAIF_BIT_CLK:
+ case LPAIF_OSR_CLK:
+ return q6afe_port_set_sysclk(port, clk_id,
+ Q6AFE_LPASS_CLK_SRC_INTERNAL,
+ Q6AFE_LPASS_CLK_ROOT_DEFAULT,
+ freq, dir);
+<<<<<<<
+=======
+ case Q6AFE_LPASS_CLK_ID_PRI_MI2S_IBIT ... Q6AFE_LPASS_CLK_ID_QUI_MI2S_OSR:
+ case Q6AFE_LPASS_CLK_ID_MCLK_1 ... Q6AFE_LPASS_CLK_ID_INT_MCLK_1:
+ return q6afe_port_set_sysclk(port, clk_id,
+ Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_NO,
+ Q6AFE_LPASS_CLK_ROOT_DEFAULT,
+ freq, dir);
+ case Q6AFE_LPASS_CLK_ID_PRI_TDM_IBIT ... Q6AFE_LPASS_CLK_ID_QUIN_TDM_EBIT:
+ return q6afe_port_set_sysclk(port, clk_id,
+ Q6AFE_LPASS_CLK_ATTRIBUTE_INVERT_COUPLE_NO,
+ Q6AFE_LPASS_CLK_ROOT_DEFAULT,
+ freq, dir);
+>>>>>>>
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dapm_route q6afe_dapm_routes[] = {
+ {"HDMI Playback", NULL, "HDMI_RX"},
+ {"Slimbus1 Playback", NULL, "SLIMBUS_1_RX"},
+ {"Slimbus2 Playback", NULL, "SLIMBUS_2_RX"},
+ {"Slimbus3 Playback", NULL, "SLIMBUS_3_RX"},
+ {"Slimbus4 Playback", NULL, "SLIMBUS_4_RX"},
+ {"Slimbus5 Playback", NULL, "SLIMBUS_5_RX"},
+ {"Slimbus6 Playback", NULL, "SLIMBUS_6_RX"},
+
+ {"Primary MI2S Playback", NULL, "PRI_MI2S_RX"},
+ {"Secondary MI2S Playback", NULL, "SEC_MI2S_RX"},
+ {"Tertiary MI2S Playback", NULL, "TERT_MI2S_RX"},
+ {"Quaternary MI2S Playback", NULL, "QUAT_MI2S_RX"},
+
+<<<<<<<
+=======
+ {"Primary TDM0 Playback", NULL, "PRIMARY_TDM_RX_0"},
+ {"Primary TDM1 Playback", NULL, "PRIMARY_TDM_RX_1"},
+ {"Primary TDM2 Playback", NULL, "PRIMARY_TDM_RX_2"},
+ {"Primary TDM3 Playback", NULL, "PRIMARY_TDM_RX_3"},
+ {"Primary TDM4 Playback", NULL, "PRIMARY_TDM_RX_4"},
+ {"Primary TDM5 Playback", NULL, "PRIMARY_TDM_RX_5"},
+ {"Primary TDM6 Playback", NULL, "PRIMARY_TDM_RX_6"},
+ {"Primary TDM7 Playback", NULL, "PRIMARY_TDM_RX_7"},
+
+ {"Secondary TDM0 Playback", NULL, "SEC_TDM_RX_0"},
+ {"Secondary TDM1 Playback", NULL, "SEC_TDM_RX_1"},
+ {"Secondary TDM2 Playback", NULL, "SEC_TDM_RX_2"},
+ {"Secondary TDM3 Playback", NULL, "SEC_TDM_RX_3"},
+ {"Secondary TDM4 Playback", NULL, "SEC_TDM_RX_4"},
+ {"Secondary TDM5 Playback", NULL, "SEC_TDM_RX_5"},
+ {"Secondary TDM6 Playback", NULL, "SEC_TDM_RX_6"},
+ {"Secondary TDM7 Playback", NULL, "SEC_TDM_RX_7"},
+
+ {"Tertiary TDM0 Playback", NULL, "TERT_TDM_RX_0"},
+ {"Tertiary TDM1 Playback", NULL, "TERT_TDM_RX_1"},
+ {"Tertiary TDM2 Playback", NULL, "TERT_TDM_RX_2"},
+ {"Tertiary TDM3 Playback", NULL, "TERT_TDM_RX_3"},
+ {"Tertiary TDM4 Playback", NULL, "TERT_TDM_RX_4"},
+ {"Tertiary TDM5 Playback", NULL, "TERT_TDM_RX_5"},
+ {"Tertiary TDM6 Playback", NULL, "TERT_TDM_RX_6"},
+ {"Tertiary TDM7 Playback", NULL, "TERT_TDM_RX_7"},
+
+ {"Quaternary TDM0 Playback", NULL, "QUAT_TDM_RX_0"},
+ {"Quaternary TDM1 Playback", NULL, "QUAT_TDM_RX_1"},
+ {"Quaternary TDM2 Playback", NULL, "QUAT_TDM_RX_2"},
+ {"Quaternary TDM3 Playback", NULL, "QUAT_TDM_RX_3"},
+ {"Quaternary TDM4 Playback", NULL, "QUAT_TDM_RX_4"},
+ {"Quaternary TDM5 Playback", NULL, "QUAT_TDM_RX_5"},
+ {"Quaternary TDM6 Playback", NULL, "QUAT_TDM_RX_6"},
+ {"Quaternary TDM7 Playback", NULL, "QUAT_TDM_RX_7"},
+
+ {"Quinary TDM0 Playback", NULL, "QUIN_TDM_RX_0"},
+ {"Quinary TDM1 Playback", NULL, "QUIN_TDM_RX_1"},
+ {"Quinary TDM2 Playback", NULL, "QUIN_TDM_RX_2"},
+ {"Quinary TDM3 Playback", NULL, "QUIN_TDM_RX_3"},
+ {"Quinary TDM4 Playback", NULL, "QUIN_TDM_RX_4"},
+ {"Quinary TDM5 Playback", NULL, "QUIN_TDM_RX_5"},
+ {"Quinary TDM6 Playback", NULL, "QUIN_TDM_RX_6"},
+ {"Quinary TDM7 Playback", NULL, "QUIN_TDM_RX_7"},
+
+ {"PRIMARY_TDM_TX_0", NULL, "Primary TDM0 Capture"},
+ {"PRIMARY_TDM_TX_1", NULL, "Primary TDM1 Capture"},
+ {"PRIMARY_TDM_TX_2", NULL, "Primary TDM2 Capture"},
+ {"PRIMARY_TDM_TX_3", NULL, "Primary TDM3 Capture"},
+ {"PRIMARY_TDM_TX_4", NULL, "Primary TDM4 Capture"},
+ {"PRIMARY_TDM_TX_5", NULL, "Primary TDM5 Capture"},
+ {"PRIMARY_TDM_TX_6", NULL, "Primary TDM6 Capture"},
+ {"PRIMARY_TDM_TX_7", NULL, "Primary TDM7 Capture"},
+
+ {"SEC_TDM_TX_0", NULL, "Secondary TDM0 Capture"},
+ {"SEC_TDM_TX_1", NULL, "Secondary TDM1 Capture"},
+ {"SEC_TDM_TX_2", NULL, "Secondary TDM2 Capture"},
+ {"SEC_TDM_TX_3", NULL, "Secondary TDM3 Capture"},
+ {"SEC_TDM_TX_4", NULL, "Secondary TDM4 Capture"},
+ {"SEC_TDM_TX_5", NULL, "Secondary TDM5 Capture"},
+ {"SEC_TDM_TX_6", NULL, "Secondary TDM6 Capture"},
+ {"SEC_TDM_TX_7", NULL, "Secondary TDM7 Capture"},
+
+ {"TERT_TDM_TX_0", NULL, "Tertiary TDM0 Capture"},
+ {"TERT_TDM_TX_1", NULL, "Tertiary TDM1 Capture"},
+ {"TERT_TDM_TX_2", NULL, "Tertiary TDM2 Capture"},
+ {"TERT_TDM_TX_3", NULL, "Tertiary TDM3 Capture"},
+ {"TERT_TDM_TX_4", NULL, "Tertiary TDM4 Capture"},
+ {"TERT_TDM_TX_5", NULL, "Tertiary TDM5 Capture"},
+ {"TERT_TDM_TX_6", NULL, "Tertiary TDM6 Capture"},
+ {"TERT_TDM_TX_7", NULL, "Tertiary TDM7 Capture"},
+
+ {"QUAT_TDM_TX_0", NULL, "Quaternary TDM0 Capture"},
+ {"QUAT_TDM_TX_1", NULL, "Quaternary TDM1 Capture"},
+ {"QUAT_TDM_TX_2", NULL, "Quaternary TDM2 Capture"},
+ {"QUAT_TDM_TX_3", NULL, "Quaternary TDM3 Capture"},
+ {"QUAT_TDM_TX_4", NULL, "Quaternary TDM4 Capture"},
+ {"QUAT_TDM_TX_5", NULL, "Quaternary TDM5 Capture"},
+ {"QUAT_TDM_TX_6", NULL, "Quaternary TDM6 Capture"},
+ {"QUAT_TDM_TX_7", NULL, "Quaternary TDM7 Capture"},
+
+ {"QUIN_TDM_TX_0", NULL, "Quinary TDM0 Capture"},
+ {"QUIN_TDM_TX_1", NULL, "Quinary TDM1 Capture"},
+ {"QUIN_TDM_TX_2", NULL, "Quinary TDM2 Capture"},
+ {"QUIN_TDM_TX_3", NULL, "Quinary TDM3 Capture"},
+ {"QUIN_TDM_TX_4", NULL, "Quinary TDM4 Capture"},
+ {"QUIN_TDM_TX_5", NULL, "Quinary TDM5 Capture"},
+ {"QUIN_TDM_TX_6", NULL, "Quinary TDM6 Capture"},
+ {"QUIN_TDM_TX_7", NULL, "Quinary TDM7 Capture"},
+
+>>>>>>>
+ {"TERT_MI2S_TX", NULL, "Tertiary MI2S Capture"},
+ {"PRI_MI2S_TX", NULL, "Primary MI2S Capture"},
+ {"SEC_MI2S_TX", NULL, "Secondary MI2S Capture"},
+ {"QUAT_MI2S_TX", NULL, "Quaternary MI2S Capture"},
+};
+
+static struct snd_soc_dai_ops q6hdmi_ops = {
+ .prepare = q6afe_dai_prepare,
+ .hw_params = q6hdmi_hw_params,
+ .shutdown = q6afe_dai_shutdown,
+<<<<<<<
+ .startup = q6afe_dai_startup,
+};
+
+static struct snd_soc_dai_ops q6i2s_ops = {
+ .prepare = q6afe_mi2s_prepare,
+ .hw_params = q6i2s_hw_params,
+ .set_fmt = q6i2s_set_fmt,
+ .shutdown = q6afe_dai_shutdown,
+ .startup = q6afe_dai_startup,
+=======
+};
+
+static struct snd_soc_dai_ops q6i2s_ops = {
+ .prepare = q6afe_dai_prepare,
+ .hw_params = q6i2s_hw_params,
+ .set_fmt = q6i2s_set_fmt,
+ .shutdown = q6afe_dai_shutdown,
+>>>>>>>
+ .set_sysclk = q6afe_mi2s_set_sysclk,
+};
+
+static struct snd_soc_dai_ops q6slim_ops = {
+ .prepare = q6afe_dai_prepare,
+ .hw_params = q6slim_hw_params,
+ .shutdown = q6afe_dai_shutdown,
+<<<<<<<
+ .set_channel_map = q6slim_set_channel_map,
+};
+
+static struct snd_soc_dai_ops q6tdm_ops = {
+ .prepare = q6afe_dai_prepare,
+ .shutdown = q6afe_dai_shutdown,
+ .set_sysclk = q6afe_mi2s_set_sysclk,
+ .set_tdm_slot = q6tdm_set_tdm_slot,
+ .set_channel_map = q6tdm_set_channel_map,
+ .hw_params = q6tdm_hw_params,
+};
+
+static int msm_dai_q6_dai_probe(struct snd_soc_dai *dai)
+{
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
+ struct q6afe_port *port;
+
+=======
+ .startup = q6afe_dai_startup,
+ .set_channel_map = q6slim_set_channel_map,
+};
+
+static int msm_dai_q6_dai_probe(struct snd_soc_dai *dai)
+{
+ struct q6afe_dai_data *dai_data = q6afe_get_dai_data(dai->dev);
+ struct snd_soc_dapm_context *dapm;
+ struct q6afe_port *port;
+
+ dapm = snd_soc_component_get_dapm(dai->component);
+
+>>>>>>>
+ port = q6afe_port_get_from_id(dai->dev, dai->id);
+ if (IS_ERR(port)) {
+ dev_err(dai->dev, "Unable to get afe port\n");
+ return -EINVAL;
+ }
+ dai_data->port[dai->id] = port;
+
+ return 0;
+}
+
+static int msm_dai_q6_dai_remove(struct snd_soc_dai *dai)
+{
+<<<<<<<
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
+
+ q6afe_port_put(dai_data->port[dai->id]);
+ dai_data->port[dai->id] = NULL;
+=======
+ struct q6afe_dai_data *dai_data = q6afe_get_dai_data(dai->dev);
+
+ q6afe_port_put(dai_data->port[dai->id]);
+>>>>>>>
+
+ return 0;
+}
+
+static struct snd_soc_dai_driver q6afe_dais[] = {
+ {
+ .playback = {
+ .stream_name = "HDMI Playback",
+ .rates = SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_96000 |
+<<<<<<<
+ SNDRV_PCM_RATE_192000,
+=======
+ SNDRV_PCM_RATE_192000,
+>>>>>>>
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+ .channels_min = 2,
+ .channels_max = 8,
+ .rate_max = 192000,
+ .rate_min = 48000,
+ },
+ .ops = &q6hdmi_ops,
+<<<<<<<
+ .id = AFE_PORT_HDMI_RX,
+=======
+ .id = HDMI_RX,
+>>>>>>>
+ .name = "HDMI",
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .name = "SLIMBUS_0_RX",
+ .ops = &q6slim_ops,
+ .id = SLIMBUS_0_RX,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ .playback = {
+ .stream_name = "Slimbus Playback",
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+<<<<<<<
+ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+=======
+ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+>>>>>>>
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ }, {
+ .playback = {
+ .stream_name = "Slimbus1 Playback",
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+<<<<<<<
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+=======
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+>>>>>>>
+ .channels_min = 1,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ .name = "SLIMBUS_1_RX",
+ .ops = &q6slim_ops,
+ .id = SLIMBUS_1_RX,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .playback = {
+ .stream_name = "Slimbus2 Playback",
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+<<<<<<<
+ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+=======
+ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+>>>>>>>
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ .name = "SLIMBUS_2_RX",
+ .ops = &q6slim_ops,
+ .id = SLIMBUS_2_RX,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .playback = {
+ .stream_name = "Slimbus3 Playback",
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+<<<<<<<
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+=======
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+>>>>>>>
+ .channels_min = 1,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ .name = "SLIMBUS_3_RX",
+ .ops = &q6slim_ops,
+ .id = SLIMBUS_3_RX,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .playback = {
+ .stream_name = "Slimbus4 Playback",
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+<<<<<<<
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+=======
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+>>>>>>>
+ .channels_min = 1,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ .name = "SLIMBUS_4_RX",
+ .ops = &q6slim_ops,
+ .id = SLIMBUS_4_RX,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .playback = {
+ .stream_name = "Slimbus5 Playback",
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+<<<<<<<
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+=======
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+>>>>>>>
+ .channels_min = 1,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ .name = "SLIMBUS_5_RX",
+ .ops = &q6slim_ops,
+ .id = SLIMBUS_5_RX,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .playback = {
+ .stream_name = "Slimbus6 Playback",
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+<<<<<<<
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+=======
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000 | SNDRV_PCM_RATE_44100,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+>>>>>>>
+ .channels_min = 1,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ .ops = &q6slim_ops,
+ .name = "SLIMBUS_6_RX",
+ .id = SLIMBUS_6_RX,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .playback = {
+ .stream_name = "Primary MI2S Playback",
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+<<<<<<<
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+=======
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+>>>>>>>
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .id = PRIMARY_MI2S_RX,
+ .name = "PRI_MI2S_RX",
+ .ops = &q6i2s_ops,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .capture = {
+ .stream_name = "Primary MI2S Capture",
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+<<<<<<<
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+=======
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+>>>>>>>
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .id = PRIMARY_MI2S_TX,
+ .name = "PRI_MI2S_TX",
+ .ops = &q6i2s_ops,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .playback = {
+ .stream_name = "Secondary MI2S Playback",
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+<<<<<<<
+ SNDRV_PCM_RATE_16000,
+=======
+ SNDRV_PCM_RATE_16000,
+>>>>>>>
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .name = "SEC_MI2S_RX",
+ .id = SECONDARY_MI2S_RX,
+ .ops = &q6i2s_ops,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .capture = {
+ .stream_name = "Secondary MI2S Capture",
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+<<<<<<<
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+=======
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+>>>>>>>
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .id = SECONDARY_MI2S_TX,
+ .name = "SEC_MI2S_TX",
+ .ops = &q6i2s_ops,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .playback = {
+ .stream_name = "Tertiary MI2S Playback",
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+<<<<<<<
+ SNDRV_PCM_RATE_16000,
+=======
+ SNDRV_PCM_RATE_16000,
+>>>>>>>
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .name = "TERT_MI2S_RX",
+ .id = TERTIARY_MI2S_RX,
+ .ops = &q6i2s_ops,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .capture = {
+ .stream_name = "Tertiary MI2S Capture",
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+<<<<<<<
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+=======
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+>>>>>>>
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .id = TERTIARY_MI2S_TX,
+ .name = "TERT_MI2S_TX",
+ .ops = &q6i2s_ops,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .playback = {
+ .stream_name = "Quaternary MI2S Playback",
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+<<<<<<<
+ SNDRV_PCM_RATE_16000,
+=======
+ SNDRV_PCM_RATE_16000,
+>>>>>>>
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .name = "QUAT_MI2S_RX",
+ .id = QUATERNARY_MI2S_RX,
+ .ops = &q6i2s_ops,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .capture = {
+ .stream_name = "Quaternary MI2S Capture",
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+<<<<<<<
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+=======
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+>>>>>>>
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .id = QUATERNARY_MI2S_TX,
+ .name = "QUAT_MI2S_TX",
+ .ops = &q6i2s_ops,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ },
+<<<<<<<
+=======
+ Q6AFE_TDM_PB_DAI("Primary", 0, PRIMARY_TDM_RX_0),
+ Q6AFE_TDM_PB_DAI("Primary", 1, PRIMARY_TDM_RX_1),
+ Q6AFE_TDM_PB_DAI("Primary", 2, PRIMARY_TDM_RX_2),
+ Q6AFE_TDM_PB_DAI("Primary", 3, PRIMARY_TDM_RX_3),
+ Q6AFE_TDM_PB_DAI("Primary", 4, PRIMARY_TDM_RX_4),
+ Q6AFE_TDM_PB_DAI("Primary", 5, PRIMARY_TDM_RX_5),
+ Q6AFE_TDM_PB_DAI("Primary", 6, PRIMARY_TDM_RX_6),
+ Q6AFE_TDM_PB_DAI("Primary", 7, PRIMARY_TDM_RX_7),
+ Q6AFE_TDM_CAP_DAI("Primary", 0, PRIMARY_TDM_TX_0),
+ Q6AFE_TDM_CAP_DAI("Primary", 1, PRIMARY_TDM_TX_1),
+ Q6AFE_TDM_CAP_DAI("Primary", 2, PRIMARY_TDM_TX_2),
+ Q6AFE_TDM_CAP_DAI("Primary", 3, PRIMARY_TDM_TX_3),
+ Q6AFE_TDM_CAP_DAI("Primary", 4, PRIMARY_TDM_TX_4),
+ Q6AFE_TDM_CAP_DAI("Primary", 5, PRIMARY_TDM_TX_5),
+ Q6AFE_TDM_CAP_DAI("Primary", 6, PRIMARY_TDM_TX_6),
+ Q6AFE_TDM_CAP_DAI("Primary", 7, PRIMARY_TDM_TX_7),
+ Q6AFE_TDM_PB_DAI("Secondary", 0, SECONDARY_TDM_RX_0),
+ Q6AFE_TDM_PB_DAI("Secondary", 1, SECONDARY_TDM_RX_1),
+ Q6AFE_TDM_PB_DAI("Secondary", 2, SECONDARY_TDM_RX_2),
+ Q6AFE_TDM_PB_DAI("Secondary", 3, SECONDARY_TDM_RX_3),
+ Q6AFE_TDM_PB_DAI("Secondary", 4, SECONDARY_TDM_RX_4),
+ Q6AFE_TDM_PB_DAI("Secondary", 5, SECONDARY_TDM_RX_5),
+ Q6AFE_TDM_PB_DAI("Secondary", 6, SECONDARY_TDM_RX_6),
+ Q6AFE_TDM_PB_DAI("Secondary", 7, SECONDARY_TDM_RX_7),
+ Q6AFE_TDM_CAP_DAI("Secondary", 0, SECONDARY_TDM_TX_0),
+ Q6AFE_TDM_CAP_DAI("Secondary", 1, SECONDARY_TDM_TX_1),
+ Q6AFE_TDM_CAP_DAI("Secondary", 2, SECONDARY_TDM_TX_2),
+ Q6AFE_TDM_CAP_DAI("Secondary", 3, SECONDARY_TDM_TX_3),
+ Q6AFE_TDM_CAP_DAI("Secondary", 4, SECONDARY_TDM_TX_4),
+ Q6AFE_TDM_CAP_DAI("Secondary", 5, SECONDARY_TDM_TX_5),
+ Q6AFE_TDM_CAP_DAI("Secondary", 6, SECONDARY_TDM_TX_6),
+ Q6AFE_TDM_CAP_DAI("Secondary", 7, SECONDARY_TDM_TX_7),
+ Q6AFE_TDM_PB_DAI("Tertiary", 0, TERTIARY_TDM_RX_0),
+ Q6AFE_TDM_PB_DAI("Tertiary", 1, TERTIARY_TDM_RX_1),
+ Q6AFE_TDM_PB_DAI("Tertiary", 2, TERTIARY_TDM_RX_2),
+ Q6AFE_TDM_PB_DAI("Tertiary", 3, TERTIARY_TDM_RX_3),
+ Q6AFE_TDM_PB_DAI("Tertiary", 4, TERTIARY_TDM_RX_4),
+ Q6AFE_TDM_PB_DAI("Tertiary", 5, TERTIARY_TDM_RX_5),
+ Q6AFE_TDM_PB_DAI("Tertiary", 6, TERTIARY_TDM_RX_6),
+ Q6AFE_TDM_PB_DAI("Tertiary", 7, TERTIARY_TDM_RX_7),
+ Q6AFE_TDM_CAP_DAI("Tertiary", 0, TERTIARY_TDM_TX_0),
+ Q6AFE_TDM_CAP_DAI("Tertiary", 1, TERTIARY_TDM_TX_1),
+ Q6AFE_TDM_CAP_DAI("Tertiary", 2, TERTIARY_TDM_TX_2),
+ Q6AFE_TDM_CAP_DAI("Tertiary", 3, TERTIARY_TDM_TX_3),
+ Q6AFE_TDM_CAP_DAI("Tertiary", 4, TERTIARY_TDM_TX_4),
+ Q6AFE_TDM_CAP_DAI("Tertiary", 5, TERTIARY_TDM_TX_5),
+ Q6AFE_TDM_CAP_DAI("Tertiary", 6, TERTIARY_TDM_TX_6),
+ Q6AFE_TDM_CAP_DAI("Tertiary", 7, TERTIARY_TDM_TX_7),
+ Q6AFE_TDM_PB_DAI("Quaternary", 0, QUATERNARY_TDM_RX_0),
+ Q6AFE_TDM_PB_DAI("Quaternary", 1, QUATERNARY_TDM_RX_1),
+ Q6AFE_TDM_PB_DAI("Quaternary", 2, QUATERNARY_TDM_RX_2),
+ Q6AFE_TDM_PB_DAI("Quaternary", 3, QUATERNARY_TDM_RX_3),
+ Q6AFE_TDM_PB_DAI("Quaternary", 4, QUATERNARY_TDM_RX_4),
+ Q6AFE_TDM_PB_DAI("Quaternary", 5, QUATERNARY_TDM_RX_5),
+ Q6AFE_TDM_PB_DAI("Quaternary", 6, QUATERNARY_TDM_RX_6),
+ Q6AFE_TDM_PB_DAI("Quaternary", 7, QUATERNARY_TDM_RX_7),
+ Q6AFE_TDM_CAP_DAI("Quaternary", 0, QUATERNARY_TDM_TX_0),
+ Q6AFE_TDM_CAP_DAI("Quaternary", 1, QUATERNARY_TDM_TX_1),
+ Q6AFE_TDM_CAP_DAI("Quaternary", 2, QUATERNARY_TDM_TX_2),
+ Q6AFE_TDM_CAP_DAI("Quaternary", 3, QUATERNARY_TDM_TX_3),
+ Q6AFE_TDM_CAP_DAI("Quaternary", 4, QUATERNARY_TDM_TX_4),
+ Q6AFE_TDM_CAP_DAI("Quaternary", 5, QUATERNARY_TDM_TX_5),
+ Q6AFE_TDM_CAP_DAI("Quaternary", 6, QUATERNARY_TDM_TX_6),
+ Q6AFE_TDM_CAP_DAI("Quaternary", 7, QUATERNARY_TDM_TX_7),
+ Q6AFE_TDM_PB_DAI("Quinary", 0, QUINARY_TDM_RX_0),
+ Q6AFE_TDM_PB_DAI("Quinary", 1, QUINARY_TDM_RX_1),
+ Q6AFE_TDM_PB_DAI("Quinary", 2, QUINARY_TDM_RX_2),
+ Q6AFE_TDM_PB_DAI("Quinary", 3, QUINARY_TDM_RX_3),
+ Q6AFE_TDM_PB_DAI("Quinary", 4, QUINARY_TDM_RX_4),
+ Q6AFE_TDM_PB_DAI("Quinary", 5, QUINARY_TDM_RX_5),
+ Q6AFE_TDM_PB_DAI("Quinary", 6, QUINARY_TDM_RX_6),
+ Q6AFE_TDM_PB_DAI("Quinary", 7, QUINARY_TDM_RX_7),
+ Q6AFE_TDM_CAP_DAI("Quinary", 0, QUINARY_TDM_TX_0),
+ Q6AFE_TDM_CAP_DAI("Quinary", 1, QUINARY_TDM_TX_1),
+ Q6AFE_TDM_CAP_DAI("Quinary", 2, QUINARY_TDM_TX_2),
+ Q6AFE_TDM_CAP_DAI("Quinary", 3, QUINARY_TDM_TX_3),
+ Q6AFE_TDM_CAP_DAI("Quinary", 4, QUINARY_TDM_TX_4),
+ Q6AFE_TDM_CAP_DAI("Quinary", 5, QUINARY_TDM_TX_5),
+ Q6AFE_TDM_CAP_DAI("Quinary", 6, QUINARY_TDM_TX_6),
+ Q6AFE_TDM_CAP_DAI("Quinary", 7, QUINARY_TDM_TX_7),
+>>>>>>>
+};
+
+static int q6afe_of_xlate_dai_name(struct snd_soc_component *component,
+ struct of_phandle_args *args,
+ const char **dai_name)
+{
+ int id = args->args[0];
+<<<<<<<
+ int i, ret = -EINVAL;
+=======
+ int ret = -EINVAL;
+ int i;
+>>>>>>>
+
+ for (i = 0; i < ARRAY_SIZE(q6afe_dais); i++) {
+ if (q6afe_dais[i].id == id) {
+ *dai_name = q6afe_dais[i].name;
+ ret = 0;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static const struct snd_soc_dapm_widget q6afe_dai_widgets[] = {
+ SND_SOC_DAPM_AIF_OUT("HDMI_RX", "HDMI Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_0_RX", "Slimbus Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_1_RX", "Slimbus1 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_2_RX", "Slimbus2 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_3_RX", "Slimbus3 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_4_RX", "Slimbus4 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_5_RX", "Slimbus5 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_6_RX", "Slimbus6 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUAT_MI2S_RX", "Quaternary MI2S Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUAT_MI2S_TX", "Quaternary MI2S Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("TERT_MI2S_RX", "Tertiary MI2S Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("TERT_MI2S_TX", "Tertiary MI2S Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SEC_MI2S_RX", "Secondary MI2S Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SEC_MI2S_TX", "Secondary MI2S Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SEC_MI2S_RX_SD1",
+ "Secondary MI2S Playback SD1",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("PRI_MI2S_RX", "Primary MI2S Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("PRI_MI2S_TX", "Primary MI2S Capture",
+ 0, 0, 0, 0),
+<<<<<<<
+=======
+
+ SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_0", "Primary TDM0 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_1", "Primary TDM1 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_2", "Primary TDM2 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_3", "Primary TDM3 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_4", "Primary TDM4 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_5", "Primary TDM5 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_6", "Primary TDM6 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_7", "Primary TDM7 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_0", "Primary TDM0 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_1", "Primary TDM1 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_2", "Primary TDM2 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_3", "Primary TDM3 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_4", "Primary TDM4 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_5", "Primary TDM5 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_6", "Primary TDM6 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_7", "Primary TDM7 Capture",
+ 0, 0, 0, 0),
+
+ SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_0", "Secondary TDM0 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_1", "Secondary TDM1 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_2", "Secondary TDM2 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_3", "Secondary TDM3 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_4", "Secondary TDM4 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_5", "Secondary TDM5 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_6", "Secondary TDM6 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_7", "Secondary TDM7 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_0", "Secondary TDM0 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_1", "Secondary TDM1 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_2", "Secondary TDM2 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_3", "Secondary TDM3 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_4", "Secondary TDM4 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_5", "Secondary TDM5 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_6", "Secondary TDM6 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_7", "Secondary TDM7 Capture",
+ 0, 0, 0, 0),
+
+ SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_0", "Tertiary TDM0 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_1", "Tertiary TDM1 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_2", "Tertiary TDM2 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_3", "Tertiary TDM3 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_4", "Tertiary TDM4 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_5", "Tertiary TDM5 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_6", "Tertiary TDM6 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_7", "Tertiary TDM7 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_0", "Tertiary TDM0 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_1", "Tertiary TDM1 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_2", "Tertiary TDM2 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_3", "Tertiary TDM3 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_4", "Tertiary TDM4 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_5", "Tertiary TDM5 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_6", "Tertiary TDM6 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_7", "Tertiary TDM7 Capture",
+ 0, 0, 0, 0),
+
+ SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_0", "Quaternary TDM0 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_1", "Quaternary TDM1 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_2", "Quaternary TDM2 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_3", "Quaternary TDM3 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_4", "Quaternary TDM4 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_5", "Quaternary TDM5 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_6", "Quaternary TDM6 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_7", "Quaternary TDM7 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_0", "Quaternary TDM0 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_1", "Quaternary TDM1 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_2", "Quaternary TDM2 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_3", "Quaternary TDM3 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_4", "Quaternary TDM4 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_5", "Quaternary TDM5 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_6", "Quaternary TDM6 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_7", "Quaternary TDM7 Capture",
+ 0, 0, 0, 0),
+
+ SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_0", "Quinary TDM0 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_1", "Quinary TDM1 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_2", "Quinary TDM2 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_3", "Quinary TDM3 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_4", "Quinary TDM4 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_5", "Quinary TDM5 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_6", "Quinary TDM6 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_7", "Quinary TDM7 Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_0", "Quinary TDM0 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_1", "Quinary TDM1 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_2", "Quinary TDM2 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_3", "Quinary TDM3 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_4", "Quinary TDM4 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_5", "Quinary TDM5 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_6", "Quinary TDM6 Capture",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_7", "Quinary TDM7 Capture",
+ 0, 0, 0, 0),
+>>>>>>>
+};
+
+static const struct snd_soc_component_driver q6afe_dai_component = {
+ .name = "q6afe-dai-component",
+ .dapm_widgets = q6afe_dai_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(q6afe_dai_widgets),
+<<<<<<<
+=======
+ .controls = q6afe_config_controls,
+ .num_controls = ARRAY_SIZE(q6afe_config_controls),
+>>>>>>>
+ .dapm_routes = q6afe_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(q6afe_dapm_routes),
+ .of_xlate_dai_name = q6afe_of_xlate_dai_name,
+
+};
+
+<<<<<<<
+int q6afe_dai_dev_probe(struct device *dev)
+{
+ int rc = 0;
+ struct q6afe_dai_data *dai_data;
+
+ dai_data = devm_kzalloc(dev, sizeof(*dai_data), GFP_KERNEL);
+ if (!dai_data)
+ rc = -ENOMEM;
+
+ q6afe_set_dai_data(dev, dai_data);
+
+ return devm_snd_soc_register_component(dev, &q6afe_dai_component,
+ q6afe_dais, ARRAY_SIZE(q6afe_dais));
+}
+EXPORT_SYMBOL_GPL(q6afe_dai_dev_probe);
+
+int q6afe_dai_dev_remove(struct device *dev)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(q6afe_dai_dev_remove);
+=======
+static void of_q6afe_parse_dai_data(struct device *dev,
+ struct q6afe_dai_data *data)
+{
+ struct device_node *node;
+ int ret;
+
+ for_each_child_of_node(dev->of_node, node) {
+ unsigned int lines[Q6AFE_MAX_MI2S_LINES];
+ struct q6afe_dai_priv_data *priv;
+ int id, i, num_lines;
+
+ ret = of_property_read_u32(node, "reg", &id);
+ if (ret || id > AFE_PORT_MAX) {
+ dev_err(dev, "valid dai id not found:%d\n", ret);
+ continue;
+ }
+
+ switch (id) {
+ /* MI2S specific properties */
+ case PRIMARY_MI2S_RX ... QUATERNARY_MI2S_TX:
+ priv = &data->priv[id];
+ ret = of_property_read_variable_u32_array(node,
+ "qcom,sd-lines",
+ lines, 0,
+ Q6AFE_MAX_MI2S_LINES);
+ if (ret < 0)
+ num_lines = 0;
+ else
+ num_lines = ret;
+
+ priv->sd_line_mask = 0;
+
+ for (i = 0; i < num_lines; i++)
+ priv->sd_line_mask |= BIT(lines[i]);
+
+ break;
+ case PRIMARY_TDM_RX_0 ... QUINARY_TDM_TX_7:
+ priv = &data->priv[id];
+ ret = of_property_read_u32(node, "qcom,tdm-sync-mode",
+ &priv->sync_mode);
+ if (ret) {
+ dev_err(dev, "No Sync mode from DT\n");
+ break;
+ }
+ ret = of_property_read_u32(node, "qcom,tdm-sync-src",
+ &priv->sync_src);
+ if (ret) {
+ dev_err(dev, "No Sync Src from DT\n");
+ break;
+ }
+ ret = of_property_read_u32(node, "qcom,tdm-data-out",
+ &priv->data_out_enable);
+ if (ret) {
+ dev_err(dev, "No Data out enable from DT\n");
+ break;
+ }
+ ret = of_property_read_u32(node, "qcom,tdm-invert-sync",
+ &priv->invert_sync);
+ if (ret) {
+ dev_err(dev, "No Invert sync from DT\n");
+ break;
+ }
+ ret = of_property_read_u32(node, "qcom,tdm-data-delay",
+ &priv->data_delay);
+ if (ret) {
+ dev_err(dev, "No Data Delay from DT\n");
+ break;
+ }
+ ret = of_property_read_u32(node, "qcom,tdm-data-align",
+ &priv->data_align);
+ if (ret) {
+ dev_err(dev, "No Data align from DT\n");
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static int q6afe_dai_bind(struct device *dev, struct device *master, void *data)
+{
+ struct q6afe_dai_data *dai_data;
+
+ dai_data = kzalloc(sizeof(*dai_data), GFP_KERNEL);
+ if (!dai_data)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, dai_data);
+
+ of_q6afe_parse_dai_data(dev, dai_data);
+
+ return snd_soc_register_component(dev, &q6afe_dai_component,
+ q6afe_dais, ARRAY_SIZE(q6afe_dais));
+}
+
+static void q6afe_dai_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct q6afe_dai_data *dai_data = dev_get_drvdata(dev);
+
+ snd_soc_unregister_component(dev);
+ kfree(dai_data);
+}
+
+static const struct component_ops q6afe_dai_comp_ops = {
+ .bind = q6afe_dai_bind,
+ .unbind = q6afe_dai_unbind,
+};
+
+static int q6afe_dai_dev_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &q6afe_dai_comp_ops);
+}
+
+static int q6afe_dai_dev_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &q6afe_dai_comp_ops);
+ return 0;
+}
+
+static struct platform_driver q6afe_dai_platform_driver = {
+ .driver = {
+ .name = "q6afe-dai",
+ },
+ .probe = q6afe_dai_dev_probe,
+ .remove = q6afe_dai_dev_remove,
+};
+module_platform_driver(q6afe_dai_platform_driver);
+
+>>>>>>>
+MODULE_DESCRIPTION("Q6 Audio Fronend dai driver");
+MODULE_LICENSE("GPL v2");
diff --git a/rr-cache/dfb0515b85f9029dc7d918f636a84498bb3f477b/preimage b/rr-cache/dfb0515b85f9029dc7d918f636a84498bb3f477b/preimage
new file mode 100644
index 0000000..3b7e9db
--- /dev/null
+++ b/rr-cache/dfb0515b85f9029dc7d918f636a84498bb3f477b/preimage
@@ -0,0 +1,1384 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/qcom,gcc-msm8996.h>
+#include <dt-bindings/clock/qcom,mmcc-msm8996.h>
+#include <dt-bindings/clock/qcom,rpmcc.h>
+#include <dt-bindings/soc/qcom,apr.h>
+
+/ {
+ model = "Qualcomm Technologies, Inc. MSM8996";
+
+ interrupt-parent = <&intc>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen { };
+
+ memory {
+ device_type = "memory";
+ /* We expect the bootloader to fill in the reg */
+ reg = <0 0 0 0>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ mba_region: mba@91500000 {
+ reg = <0x0 0x91500000 0x0 0x200000>;
+ no-map;
+ };
+
+ slpi_region: slpi@90b00000 {
+ reg = <0x0 0x90b00000 0x0 0xa00000>;
+ no-map;
+ };
+
+ venus_region: venus@90400000 {
+ reg = <0x0 0x90400000 0x0 0x700000>;
+ no-map;
+ };
+
+ adsp_region: adsp@8ea00000 {
+ reg = <0x0 0x8ea00000 0x0 0x1a00000>;
+ no-map;
+ };
+
+ mpss_region: mpss@88800000 {
+ reg = <0x0 0x88800000 0x0 0x6200000>;
+ no-map;
+ };
+
+ smem_mem: smem-mem@86000000 {
+ reg = <0x0 0x86000000 0x0 0x200000>;
+ no-map;
+ };
+
+ memory@85800000 {
+ reg = <0x0 0x85800000 0x0 0x800000>;
+ no-map;
+ };
+
+ memory@86200000 {
+ reg = <0x0 0x86200000 0x0 0x2600000>;
+ no-map;
+ };
+
+ rmtfs@86700000 {
+ compatible = "qcom,rmtfs-mem";
+
+ size = <0x0 0x200000>;
+ alloc-ranges = <0x0 0xa0000000 0x0 0x2000000>;
+ no-map;
+
+ qcom,client-id = <1>;
+ qcom,vmid = <15>;
+ };
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ CPU0: cpu@0 {
+ device_type = "cpu";
+ compatible = "qcom,kryo";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ next-level-cache = <&L2_0>;
+ L2_0: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ };
+ };
+
+ CPU1: cpu@1 {
+ device_type = "cpu";
+ compatible = "qcom,kryo";
+ reg = <0x0 0x1>;
+ enable-method = "psci";
+ next-level-cache = <&L2_0>;
+ };
+
+ CPU2: cpu@100 {
+ device_type = "cpu";
+ compatible = "qcom,kryo";
+ reg = <0x0 0x100>;
+ enable-method = "psci";
+ next-level-cache = <&L2_1>;
+ L2_1: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ };
+ };
+
+ CPU3: cpu@101 {
+ device_type = "cpu";
+ compatible = "qcom,kryo";
+ reg = <0x0 0x101>;
+ enable-method = "psci";
+ next-level-cache = <&L2_1>;
+ };
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&CPU0>;
+ };
+
+ core1 {
+ cpu = <&CPU1>;
+ };
+ };
+
+ cluster1 {
+ core0 {
+ cpu = <&CPU2>;
+ };
+
+ core1 {
+ cpu = <&CPU3>;
+ };
+ };
+ };
+ };
+
+ thermal-zones {
+ cpu-thermal0 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 3>;
+
+ trips {
+ cpu_alert0: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit0: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal1 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 5>;
+
+ trips {
+ cpu_alert1: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit1: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal2 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 8>;
+
+ trips {
+ cpu_alert2: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit2: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal3 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 10>;
+
+ trips {
+ cpu_alert3: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit3: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ clocks {
+ xo_board: xo_board {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <19200000>;
+ clock-output-names = "xo_board";
+ };
+
+ sleep_clk: sleep_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32764>;
+ clock-output-names = "sleep_clk";
+ };
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ firmware {
+ scm {
+ compatible = "qcom,scm-msm8996";
+
+ qcom,dload-mode = <&tcsr 0x13000>;
+ };
+ };
+
+ tcsr_mutex: hwlock {
+ compatible = "qcom,tcsr-mutex";
+ syscon = <&tcsr_mutex_regs 0 0x1000>;
+ #hwlock-cells = <1>;
+ };
+
+ smem {
+ compatible = "qcom,smem";
+ memory-region = <&smem_mem>;
+ hwlocks = <&tcsr_mutex 3>;
+ };
+
+ rpm-glink {
+ compatible = "qcom,glink-rpm";
+
+ interrupts = <GIC_SPI 168 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,rpm-msg-ram = <&rpm_msg_ram>;
+
+ mboxes = <&apcs_glb 0>;
+
+ rpm_requests {
+ compatible = "qcom,rpm-msm8996";
+ qcom,glink-channels = "rpm_requests";
+
+ rpmcc: qcom,rpmcc {
+ compatible = "qcom,rpmcc-msm8996";
+ #clock-cells = <1>;
+ };
+
+ pm8994-regulators {
+ compatible = "qcom,rpm-pm8994-regulators";
+
+ pm8994_s1: s1 {};
+ pm8994_s2: s2 {};
+ pm8994_s3: s3 {};
+ pm8994_s4: s4 {};
+ pm8994_s5: s5 {};
+ pm8994_s6: s6 {};
+ pm8994_s7: s7 {};
+ pm8994_s8: s8 {};
+ pm8994_s9: s9 {};
+ pm8994_s10: s10 {};
+ pm8994_s11: s11 {};
+ pm8994_s12: s12 {};
+
+ pm8994_l1: l1 {};
+ pm8994_l2: l2 {};
+ pm8994_l3: l3 {};
+ pm8994_l4: l4 {};
+ pm8994_l5: l5 {};
+ pm8994_l6: l6 {};
+ pm8994_l7: l7 {};
+ pm8994_l8: l8 {};
+ pm8994_l9: l9 {};
+ pm8994_l10: l10 {};
+ pm8994_l11: l11 {};
+ pm8994_l12: l12 {};
+ pm8994_l13: l13 {};
+ pm8994_l14: l14 {};
+ pm8994_l15: l15 {};
+ pm8994_l16: l16 {};
+ pm8994_l17: l17 {};
+ pm8994_l18: l18 {};
+ pm8994_l19: l19 {};
+ pm8994_l20: l20 {};
+ pm8994_l21: l21 {};
+ pm8994_l22: l22 {};
+ pm8994_l23: l23 {};
+ pm8994_l24: l24 {};
+ pm8994_l25: l25 {};
+ pm8994_l26: l26 {};
+ pm8994_l27: l27 {};
+ pm8994_l28: l28 {};
+ pm8994_l29: l29 {};
+ pm8994_l30: l30 {};
+ pm8994_l31: l31 {};
+ pm8994_l32: l32 {};
+ };
+
+ };
+ };
+
+ soc: soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0xffffffff>;
+ compatible = "simple-bus";
+
+ rpm_msg_ram: memory@68000 {
+ compatible = "qcom,rpm-msg-ram";
+ reg = <0x68000 0x6000>;
+ };
+
+ tcsr_mutex_regs: syscon@740000 {
+ compatible = "syscon";
+ reg = <0x740000 0x20000>;
+ };
+
+ tcsr: syscon@7a0000 {
+ compatible = "qcom,tcsr-msm8996", "syscon";
+ reg = <0x7a0000 0x18000>;
+ };
+
+ intc: interrupt-controller@9bc0000 {
+ compatible = "arm,gic-v3";
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ #redistributor-regions = <1>;
+ redistributor-stride = <0x0 0x40000>;
+ reg = <0x09bc0000 0x10000>,
+ <0x09c00000 0x100000>;
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ apcs: syscon@9820000 {
+ compatible = "syscon";
+ reg = <0x9820000 0x1000>;
+ };
+
+ apcs_glb: mailbox@9820000 {
+ compatible = "qcom,msm8996-apcs-hmss-global";
+ reg = <0x9820000 0x1000>;
+
+ #mbox-cells = <1>;
+ };
+
+ gcc: clock-controller@300000 {
+ compatible = "qcom,gcc-msm8996";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ reg = <0x300000 0x90000>;
+ };
+
+ kryocc: clock-controller@6400000 {
+ compatible = "qcom,apcc-msm8996";
+ reg = <0x6400000 0x90000>;
+ #clock-cells = <1>;
+ };
+
+ blsp1_spi0: spi@7575000 {
+ compatible = "qcom,spi-qup-v2.2.1";
+ reg = <0x07575000 0x600>;
+ interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_QUP1_SPI_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp1_spi0_default>;
+ pinctrl-1 = <&blsp1_spi0_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp2_i2c0: i2c@75b5000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x075b5000 0x1000>;
+ interrupts = <GIC_SPI 101 0>;
+ clocks = <&gcc GCC_BLSP2_AHB_CLK>,
+ <&gcc GCC_BLSP2_QUP1_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_i2c0_default>;
+ pinctrl-1 = <&blsp2_i2c0_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ tsens0: thermal-sensor@4a8000 {
+ compatible = "qcom,msm8996-tsens";
+ reg = <0x4a8000 0x2000>;
+ #thermal-sensor-cells = <1>;
+ };
+
+ blsp2_uart1: serial@75b0000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0x75b0000 0x1000>;
+ interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_UART2_APPS_CLK>,
+ <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "core", "iface";
+ status = "disabled";
+ };
+
+ blsp2_i2c1: i2c@75b6000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x075b6000 0x1000>;
+ interrupts = <GIC_SPI 102 0>;
+ clocks = <&gcc GCC_BLSP2_AHB_CLK>,
+ <&gcc GCC_BLSP2_QUP2_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_i2c1_default>;
+ pinctrl-1 = <&blsp2_i2c1_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp2_uart2: serial@75b1000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0x075b1000 0x1000>;
+ interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_UART3_APPS_CLK>,
+ <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "core", "iface";
+ status = "disabled";
+ };
+
+ blsp1_i2c2: i2c@7577000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x07577000 0x1000>;
+ interrupts = <GIC_SPI 97 0>;
+ clocks = <&gcc GCC_BLSP1_AHB_CLK>,
+ <&gcc GCC_BLSP1_QUP3_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp1_i2c2_default>;
+ pinctrl-1 = <&blsp1_i2c2_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp2_spi5: spi@75ba000{
+ compatible = "qcom,spi-qup-v2.2.1";
+ reg = <0x075ba000 0x600>;
+ interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_QUP6_SPI_APPS_CLK>,
+ <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "core", "iface";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_spi5_default>;
+ pinctrl-1 = <&blsp2_spi5_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ sdhc2: sdhci@74a4900 {
+ status = "disabled";
+ compatible = "qcom,sdhci-msm-v4";
+ reg = <0x74a4900 0x314>, <0x74a4000 0x800>;
+ reg-names = "hc_mem", "core_mem";
+
+ interrupts = <0 125 0>, <0 221 0>;
+ interrupt-names = "hc_irq", "pwr_irq";
+
+ clock-names = "iface", "core", "xo";
+ clocks = <&gcc GCC_SDCC2_AHB_CLK>,
+ <&gcc GCC_SDCC2_APPS_CLK>,
+ <&xo_board>;
+ bus-width = <4>;
+ };
+
+ msmgpio: pinctrl@1010000 {
+ compatible = "qcom,msm8996-pinctrl";
+ reg = <0x01010000 0x300000>;
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ timer@9840000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ compatible = "arm,armv7-timer-mem";
+ reg = <0x09840000 0x1000>;
+ clock-frequency = <19200000>;
+
+ frame@9850000 {
+ frame-number = <0>;
+ interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x09850000 0x1000>,
+ <0x09860000 0x1000>;
+ };
+
+ frame@9870000 {
+ frame-number = <1>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x09870000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@9880000 {
+ frame-number = <2>;
+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x09880000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@9890000 {
+ frame-number = <3>;
+ interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x09890000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@98a0000 {
+ frame-number = <4>;
+ interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x098a0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@98b0000 {
+ frame-number = <5>;
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x098b0000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@98c0000 {
+ frame-number = <6>;
+ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x098c0000 0x1000>;
+ status = "disabled";
+ };
+ };
+
+ spmi_bus: qcom,spmi@400f000 {
+ compatible = "qcom,spmi-pmic-arb";
+ reg = <0x400f000 0x1000>,
+ <0x4400000 0x800000>,
+ <0x4c00000 0x800000>,
+ <0x5800000 0x200000>,
+ <0x400a000 0x002100>;
+ reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
+ interrupt-names = "periph_irq";
+ interrupts = <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,ee = <0>;
+ qcom,channel = <0>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ interrupt-controller;
+ #interrupt-cells = <4>;
+ };
+
+ mmcc: clock-controller@8c0000 {
+ compatible = "qcom,mmcc-msm8996";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ reg = <0x8c0000 0x40000>;
+ assigned-clocks = <&mmcc MMPLL9_PLL>,
+ <&mmcc MMPLL1_PLL>,
+ <&mmcc MMPLL3_PLL>,
+ <&mmcc MMPLL4_PLL>,
+ <&mmcc MMPLL5_PLL>;
+ assigned-clock-rates = <624000000>,
+ <810000000>,
+ <980000000>,
+ <960000000>,
+ <825000000>;
+ };
+
+ qfprom@74000 {
+ compatible = "qcom,qfprom";
+ reg = <0x74000 0x8ff>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ qusb2p_hstx_trim: hstx_trim@24e {
+ reg = <0x24e 0x2>;
+ bits = <5 4>;
+ };
+
+ qusb2s_hstx_trim: hstx_trim@24f {
+ reg = <0x24f 0x1>;
+ bits = <1 4>;
+ };
+ };
+
+ phy@34000 {
+ compatible = "qcom,msm8996-qmp-pcie-phy";
+ reg = <0x34000 0x488>;
+ #clock-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
+ <&gcc GCC_PCIE_PHY_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_CLKREF_CLK>;
+ clock-names = "aux", "cfg_ahb", "ref";
+
+ vdda-phy-supply = <&pm8994_l28>;
+ vdda-pll-supply = <&pm8994_l12>;
+
+ resets = <&gcc GCC_PCIE_PHY_BCR>,
+ <&gcc GCC_PCIE_PHY_COM_BCR>,
+ <&gcc GCC_PCIE_PHY_COM_NOCSR_BCR>;
+ reset-names = "phy", "common", "cfg";
+ status = "disabled";
+
+ pciephy_0: lane@35000 {
+ reg = <0x035000 0x130>,
+ <0x035200 0x200>,
+ <0x035400 0x1dc>;
+ #phy-cells = <0>;
+
+ clock-output-names = "pcie_0_pipe_clk_src";
+ clocks = <&gcc GCC_PCIE_0_PIPE_CLK>;
+ clock-names = "pipe0";
+ resets = <&gcc GCC_PCIE_0_PHY_BCR>;
+ reset-names = "lane0";
+ };
+
+ pciephy_1: lane@36000 {
+ reg = <0x036000 0x130>,
+ <0x036200 0x200>,
+ <0x036400 0x1dc>;
+ #phy-cells = <0>;
+
+ clock-output-names = "pcie_1_pipe_clk_src";
+ clocks = <&gcc GCC_PCIE_1_PIPE_CLK>;
+ clock-names = "pipe1";
+ resets = <&gcc GCC_PCIE_1_PHY_BCR>;
+ reset-names = "lane1";
+ };
+
+ pciephy_2: lane@37000 {
+ reg = <0x037000 0x130>,
+ <0x037200 0x200>,
+ <0x037400 0x1dc>;
+ #phy-cells = <0>;
+
+ clock-output-names = "pcie_2_pipe_clk_src";
+ clocks = <&gcc GCC_PCIE_2_PIPE_CLK>;
+ clock-names = "pipe2";
+ resets = <&gcc GCC_PCIE_2_PHY_BCR>;
+ reset-names = "lane2";
+ };
+ };
+
+ phy@7410000 {
+ compatible = "qcom,msm8996-qmp-usb3-phy";
+ reg = <0x7410000 0x1c4>;
+ #clock-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clocks = <&gcc GCC_USB3_PHY_AUX_CLK>,
+ <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+ <&gcc GCC_USB3_CLKREF_CLK>;
+ clock-names = "aux", "cfg_ahb", "ref";
+
+ vdda-phy-supply = <&pm8994_l28>;
+ vdda-pll-supply = <&pm8994_l12>;
+
+ resets = <&gcc GCC_USB3_PHY_BCR>,
+ <&gcc GCC_USB3PHY_PHY_BCR>;
+ reset-names = "phy", "common";
+ status = "disabled";
+
+ ssusb_phy_0: lane@7410200 {
+ reg = <0x7410200 0x200>,
+ <0x7410400 0x130>,
+ <0x7410600 0x1a8>;
+ #phy-cells = <0>;
+
+ clock-output-names = "usb3_phy_pipe_clk_src";
+ clocks = <&gcc GCC_USB3_PHY_PIPE_CLK>;
+ clock-names = "pipe0";
+ };
+ };
+
+ hsusb_phy1: phy@7411000 {
+ compatible = "qcom,msm8996-qusb2-phy";
+ reg = <0x7411000 0x180>;
+ #phy-cells = <0>;
+
+ clocks = <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+ <&gcc GCC_RX1_USB2_CLKREF_CLK>;
+ clock-names = "cfg_ahb", "ref";
+
+ vdda-pll-supply = <&pm8994_l12>;
+ vdda-phy-dpdm-supply = <&pm8994_l24>;
+
+ resets = <&gcc GCC_QUSB2PHY_PRIM_BCR>;
+ nvmem-cells = <&qusb2p_hstx_trim>;
+ status = "disabled";
+ };
+
+ hsusb_phy2: phy@7412000 {
+ compatible = "qcom,msm8996-qusb2-phy";
+ reg = <0x7412000 0x180>;
+ #phy-cells = <0>;
+
+ clocks = <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+ <&gcc GCC_RX2_USB2_CLKREF_CLK>;
+ clock-names = "cfg_ahb", "ref";
+
+ vdda-pll-supply = <&pm8994_l12>;
+ vdda-phy-dpdm-supply = <&pm8994_l24>;
+
+ resets = <&gcc GCC_QUSB2PHY_SEC_BCR>;
+ nvmem-cells = <&qusb2s_hstx_trim>;
+ status = "disabled";
+ };
+
+ usb2: usb@7600000 {
+ compatible = "qcom,dwc3";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clocks = <&gcc GCC_PERIPH_NOC_USB20_AHB_CLK>,
+ <&gcc GCC_USB20_MASTER_CLK>,
+ <&gcc GCC_USB20_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB20_SLEEP_CLK>,
+ <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
+
+ assigned-clocks = <&gcc GCC_USB20_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB20_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <60000000>;
+
+ power-domains = <&gcc USB30_GDSC>;
+ status = "disabled";
+
+ dwc3@7600000 {
+ compatible = "snps,dwc3";
+ reg = <0x7600000 0xcc00>;
+ interrupts = <0 138 0>;
+ phys = <&hsusb_phy2>;
+ phy-names = "usb2-phy";
+ };
+ };
+
+ usb3: usb@6a00000 {
+ compatible = "qcom,dwc3";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clocks = <&gcc GCC_SYS_NOC_USB3_AXI_CLK>,
+ <&gcc GCC_USB30_MASTER_CLK>,
+ <&gcc GCC_AGGRE2_USB3_AXI_CLK>,
+ <&gcc GCC_USB30_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB30_SLEEP_CLK>,
+ <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
+
+ assigned-clocks = <&gcc GCC_USB30_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB30_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <120000000>;
+
+ power-domains = <&gcc USB30_GDSC>;
+ status = "disabled";
+
+ dwc3@6a00000 {
+ compatible = "snps,dwc3";
+ reg = <0x6a00000 0xcc00>;
+ interrupts = <0 131 0>;
+ phys = <&hsusb_phy1>, <&ssusb_phy_0>;
+ phy-names = "usb2-phy", "usb3-phy";
+ };
+ };
+
+ agnoc@0 {
+ power-domains = <&gcc AGGRE0_NOC_GDSC>;
+ compatible = "simple-pm-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ pcie0: qcom,pcie@600000 {
+ compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
+ status = "disabled";
+ power-domains = <&gcc PCIE0_GDSC>;
+ bus-range = <0x00 0xff>;
+ num-lanes = <1>;
+
+ reg = <0x00600000 0x2000>,
+ <0x0c000000 0xf1d>,
+ <0x0c000f20 0xa8>,
+ <0x0c100000 0x100000>;
+ reg-names = "parf", "dbi", "elbi","config";
+
+ phys = <&pciephy_0>;
+ phy-names = "pciephy";
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x01000000 0x0 0x0c200000 0x0c200000 0x0 0x100000>,
+ <0x02000000 0x0 0x0c300000 0x0c300000 0x0 0xd00000>;
+
+ interrupts = <GIC_SPI 405 IRQ_TYPE_NONE>;
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 244 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+ <0 0 0 2 &intc 0 245 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+ <0 0 0 3 &intc 0 247 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+ <0 0 0 4 &intc 0 248 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pcie0_clkreq_default &pcie0_perst_default &pcie0_wake_default>;
+ pinctrl-1 = <&pcie0_clkreq_sleep &pcie0_perst_default &pcie0_wake_sleep>;
+
+
+ vdda-supply = <&pm8994_l28>;
+
+ linux,pci-domain = <0>;
+
+ clocks = <&gcc GCC_PCIE_0_PIPE_CLK>,
+ <&gcc GCC_PCIE_0_AUX_CLK>,
+ <&gcc GCC_PCIE_0_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_0_MSTR_AXI_CLK>,
+ <&gcc GCC_PCIE_0_SLV_AXI_CLK>;
+
+ clock-names = "pipe",
+ "aux",
+ "cfg",
+ "bus_master",
+ "bus_slave";
+
+ };
+
+ pcie1: qcom,pcie@608000 {
+ compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
+ power-domains = <&gcc PCIE1_GDSC>;
+ bus-range = <0x00 0xff>;
+ num-lanes = <1>;
+
+ status = "disabled";
+
+ reg = <0x00608000 0x2000>,
+ <0x0d000000 0xf1d>,
+ <0x0d000f20 0xa8>,
+ <0x0d100000 0x100000>;
+
+ reg-names = "parf", "dbi", "elbi","config";
+
+ phys = <&pciephy_1>;
+ phy-names = "pciephy";
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x01000000 0x0 0x0d200000 0x0d200000 0x0 0x100000>,
+ <0x02000000 0x0 0x0d300000 0x0d300000 0x0 0xd00000>;
+
+ interrupts = <GIC_SPI 413 IRQ_TYPE_NONE>;
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 272 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+ <0 0 0 2 &intc 0 273 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+ <0 0 0 3 &intc 0 274 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+ <0 0 0 4 &intc 0 275 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pcie1_clkreq_default &pcie1_perst_default &pcie1_wake_default>;
+ pinctrl-1 = <&pcie1_clkreq_sleep &pcie1_perst_default &pcie1_wake_sleep>;
+
+
+ vdda-supply = <&pm8994_l28>;
+ linux,pci-domain = <1>;
+
+ clocks = <&gcc GCC_PCIE_1_PIPE_CLK>,
+ <&gcc GCC_PCIE_1_AUX_CLK>,
+ <&gcc GCC_PCIE_1_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_1_MSTR_AXI_CLK>,
+ <&gcc GCC_PCIE_1_SLV_AXI_CLK>;
+
+ clock-names = "pipe",
+ "aux",
+ "cfg",
+ "bus_master",
+ "bus_slave";
+ };
+
+ pcie2: qcom,pcie@610000 {
+ compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
+ power-domains = <&gcc PCIE2_GDSC>;
+ bus-range = <0x00 0xff>;
+ num-lanes = <1>;
+ status = "disabled";
+ reg = <0x00610000 0x2000>,
+ <0x0e000000 0xf1d>,
+ <0x0e000f20 0xa8>,
+ <0x0e100000 0x100000>;
+
+ reg-names = "parf", "dbi", "elbi","config";
+
+ phys = <&pciephy_2>;
+ phy-names = "pciephy";
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x01000000 0x0 0x0e200000 0x0e200000 0x0 0x100000>,
+ <0x02000000 0x0 0x0e300000 0x0e300000 0x0 0x1d00000>;
+
+ device_type = "pci";
+
+ interrupts = <GIC_SPI 421 IRQ_TYPE_NONE>;
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 142 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+ <0 0 0 2 &intc 0 143 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+ <0 0 0 3 &intc 0 144 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+ <0 0 0 4 &intc 0 145 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pcie2_clkreq_default &pcie2_perst_default &pcie2_wake_default>;
+ pinctrl-1 = <&pcie2_clkreq_sleep &pcie2_perst_default &pcie2_wake_sleep >;
+
+ vdda-supply = <&pm8994_l28>;
+
+ linux,pci-domain = <2>;
+ clocks = <&gcc GCC_PCIE_2_PIPE_CLK>,
+ <&gcc GCC_PCIE_2_AUX_CLK>,
+ <&gcc GCC_PCIE_2_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_2_MSTR_AXI_CLK>,
+ <&gcc GCC_PCIE_2_SLV_AXI_CLK>;
+
+ clock-names = "pipe",
+ "aux",
+ "cfg",
+ "bus_master",
+ "bus_slave";
+ };
+ };
+<<<<<<<
+=======
+
+ adreno_smmu: arm,smmu@b40000 {
+ compatible = "qcom,msm8996-smmu-v2";
+ reg = <0xb40000 0x10000>;
+
+ #global-interrupts = <1>;
+ interrupts = <0 334 0>,
+ <0 329 0>,
+ <0 330 0>;
+ #iommu-cells = <1>;
+
+ clocks = <&mmcc GPU_AHB_CLK>,
+ <&gcc GCC_MMSS_BIMC_GFX_CLK>;
+ clock-names = "bus", "iface";
+
+ power-domains = <&mmcc GPU_GDSC>;
+
+ status = "okay";
+ };
+
+ gpu@b00000 {
+ compatible = "qcom,adreno-530.2", "qcom,adreno";
+ #stream-id-cells = <16>;
+
+ reg = <0xb00000 0x3f000>;
+ reg-names = "kgsl_3d0_reg_memory";
+
+ interrupts = <0 300 0>;
+ interrupt-names = "kgsl_3d0_irq";
+
+ clocks = <&mmcc GPU_GX_GFX3D_CLK>,
+ <&mmcc GPU_AHB_CLK>,
+ <&mmcc GPU_GX_RBBMTIMER_CLK>,
+ <&gcc GCC_BIMC_GFX_CLK>,
+ <&gcc GCC_MMSS_BIMC_GFX_CLK>;
+
+ clock-names = "core",
+ "iface",
+ "rbbmtimer",
+ "mem",
+ "mem_iface";
+
+ power-domains = <&mmcc GPU_GDSC>;
+ iommus = <&adreno_smmu 0>;
+
+ qcom,gpu-quirk-two-pass-use-wfi;
+ qcom,gpu-quirk-fault-detect-mask;
+
+ /* This is a safe speed for bring up in all bin levels.
+ * This isn't the fastest the chip can go, but we can
+ * get there eventually */
+ qcom,gpu-pwrlevels {
+ compatible = "qcom,gpu-pwrlevels";
+ qcom,gpu-pwrlevel@0 {
+ qcom,gpu-freq = <510000000>;
+ };
+ qcom,gpu-pwrlevel@1 {
+ qcom,gpu-freq = <27000000>;
+ };
+ };
+
+ zap-shader {
+ memory-region = <&zap_shader_region>;
+ };
+ };
+
+ mdp_smmu: arm,smmu@d00000 {
+ compatible = "qcom,msm8996-smmu-v2";
+ reg = <0xd00000 0x10000>;
+
+ #global-interrupts = <1>;
+ interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>;
+ #iommu-cells = <1>;
+
+ power-domains = <&mmcc MDSS_GDSC>;
+
+ clocks = <&mmcc SMMU_MDP_AHB_CLK>,
+ <&mmcc SMMU_MDP_AXI_CLK>;
+ clock-names = "iface", "bus";
+
+ status = "okay";
+ };
+
+ mdss: mdss@900000 {
+ compatible = "qcom,mdss";
+
+ reg = <0x900000 0x1000>,
+ <0x9b0000 0x1040>,
+ <0x9b8000 0x1040>;
+ reg-names = "mdss_phys",
+ "vbif_phys",
+ "vbif_nrt_phys";
+
+ power-domains = <&mmcc MDSS_GDSC>;
+ interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ clocks = <&mmcc MDSS_AHB_CLK>;
+ clock-names = "iface_clk";
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ mdp: mdp@901000 {
+ compatible = "qcom,mdp5";
+ reg = <0x901000 0x90000>;
+ reg-names = "mdp_phys";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&mmcc MDSS_AHB_CLK>,
+ <&mmcc MDSS_AXI_CLK>,
+ <&mmcc MDSS_MDP_CLK>,
+ <&mmcc SMMU_MDP_AXI_CLK>,
+ <&mmcc MDSS_VSYNC_CLK>;
+ clock-names = "iface_clk",
+ "bus_clk",
+ "core_clk",
+ "iommu_clk",
+ "vsync_clk";
+
+ iommus = <&mdp_smmu 0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ mdp5_intf3_out: endpoint {
+ remote-endpoint = <&hdmi_in>;
+ };
+ };
+ };
+ };
+
+ hdmi: hdmi-tx@9a0000 {
+ compatible = "qcom,hdmi-tx-8996";
+ reg = <0x009a0000 0x50c>,
+ <0x00070000 0x6158>,
+ <0x009e0000 0xfff>;
+ reg-names = "core_physical",
+ "qfprom_physical",
+ "hdcp_physical";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&mmcc MDSS_MDP_CLK>,
+ <&mmcc MDSS_AHB_CLK>,
+ <&mmcc MDSS_HDMI_CLK>,
+ <&mmcc MDSS_HDMI_AHB_CLK>,
+ <&mmcc MDSS_EXTPCLK_CLK>;
+ clock-names =
+ "mdp_core_clk",
+ "iface_clk",
+ "core_clk",
+ "alt_iface_clk",
+ "extp_clk";
+
+ phys = <&hdmi_phy>;
+ phy-names = "hdmi_phy";
+ #sound-dai-cells = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ hdmi_in: endpoint {
+ remote-endpoint = <&mdp5_intf3_out>;
+ };
+ };
+ };
+ };
+
+ hdmi_phy: hdmi-phy@9a0600 {
+ compatible = "qcom,hdmi-phy-8996";
+ reg = <0x9a0600 0x1c4>,
+ <0x9a0a00 0x124>,
+ <0x9a0c00 0x124>,
+ <0x9a0e00 0x124>,
+ <0x9a1000 0x124>,
+ <0x9a1200 0x0c8>;
+ reg-names = "hdmi_pll",
+ "hdmi_tx_l0",
+ "hdmi_tx_l1",
+ "hdmi_tx_l2",
+ "hdmi_tx_l3",
+ "hdmi_phy";
+
+ clocks = <&mmcc MDSS_AHB_CLK>,
+ <&gcc GCC_HDMI_CLKREF_CLK>;
+ clock-names = "iface_clk",
+ "ref_clk";
+ };
+ };
+
+ lpass_q6_smmu: arm,smmu-lpass_q6@1600000 {
+ compatible = "qcom,msm8996-smmu-v2";
+ reg = <0x1600000 0x20000>;
+ #iommu-cells = <1>;
+ power-domains = <&gcc HLOS1_VOTE_LPASS_CORE_GDSC>;
+
+ #global-interrupts = <1>;
+ interrupts = <GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 393 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 394 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 396 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_HLOS1_VOTE_LPASS_CORE_SMMU_CLK>,
+ <&gcc GCC_HLOS1_VOTE_LPASS_ADSP_SMMU_CLK>;
+ clock-names = "iface", "bus";
+ status = "okay";
+ };
+>>>>>>>
+ };
+
+ adsp-pil {
+ compatible = "qcom,msm8996-adsp-pil";
+
+ interrupts-extended = <&intc 0 162 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 3 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog", "fatal", "ready",
+ "handover", "stop-ack";
+
+ clocks = <&xo_board>;
+ clock-names = "xo";
+
+ memory-region = <&adsp_region>;
+
+ qcom,smem-states = <&adsp_smp2p_out 0>;
+ qcom,smem-state-names = "stop";
+
+ smd-edge {
+ interrupts = <GIC_SPI 156 IRQ_TYPE_EDGE_RISING>;
+
+ label = "lpass";
+ qcom,ipc = <&apcs 16 8>;
+ qcom,smd-edge = <1>;
+ qcom,remote-pid = <2>;
+
+ apr {
+ compatible = "qcom,apr-v2";
+ qcom,smd-channels = "apr_audio_svc";
+ qcom,apr-dest-domain-id = <APR_DOMAIN_ADSP>;
+
+ q6core {
+ qcom,apr-svc-name = "CORE";
+ qcom,apr-svc-id = <APR_SVC_ADSP_CORE>;
+ compatible = "qcom,q6core";
+ };
+
+ q6afe: q6afe {
+ compatible = "qcom,q6afe";
+ qcom,apr-svc-name = "AFE";
+ qcom,apr-svc-id = <APR_SVC_AFE>;
+ #sound-dai-cells = <1>;
+ };
+
+ q6asm: q6asm {
+ compatible = "qcom,q6asm";
+ qcom,apr-svc-name = "ASM";
+ qcom,apr-svc-id = <APR_SVC_ASM>;
+ #sound-dai-cells = <1>;
+ };
+
+ q6adm: q6adm {
+ compatible = "qcom,q6adm";
+ qcom,apr-svc-name = "ADM";
+ qcom,apr-svc-id = <APR_SVC_ADM>;
+ #sound-dai-cells = <0>;
+ };
+
+ };
+ };
+ };
+
+ adsp-smp2p {
+ compatible = "qcom,smp2p";
+ qcom,smem = <443>, <429>;
+
+ interrupts = <0 158 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,ipc = <&apcs 16 10>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <2>;
+
+ adsp_smp2p_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ adsp_smp2p_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ modem-smp2p {
+ compatible = "qcom,smp2p";
+ qcom,smem = <435>, <428>;
+
+ interrupts = <GIC_SPI 451 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,ipc = <&apcs 16 14>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <1>;
+
+ modem_smp2p_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ modem_smp2p_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ smp2p-slpi {
+ compatible = "qcom,smp2p";
+ qcom,smem = <481>, <430>;
+
+ interrupts = <GIC_SPI 178 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,ipc = <&apcs 16 26>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <3>;
+
+ slpi_smp2p_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ slpi_smp2p_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+ };
+
+};
+#include "msm8996-pins.dtsi"
diff --git a/rr-cache/e0eebca18d94a992b7afdfbb051d272595421e25/postimage b/rr-cache/e0eebca18d94a992b7afdfbb051d272595421e25/postimage
new file mode 100644
index 0000000..e2d3892
--- /dev/null
+++ b/rr-cache/e0eebca18d94a992b7afdfbb051d272595421e25/postimage
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_BINDINGS_Q6_AFE_H__
+#define __DT_BINDINGS_Q6_AFE_H__
+
+/* Audio Front End (AFE) virtual ports IDs */
+#define HDMI_RX 1
+#define SLIMBUS_0_RX 2
+#define SLIMBUS_0_TX 3
+#define SLIMBUS_1_RX 4
+#define SLIMBUS_1_TX 5
+#define SLIMBUS_2_RX 6
+#define SLIMBUS_2_TX 7
+#define SLIMBUS_3_RX 8
+#define SLIMBUS_3_TX 9
+#define SLIMBUS_4_RX 10
+#define SLIMBUS_4_TX 11
+#define SLIMBUS_5_RX 12
+#define SLIMBUS_5_TX 13
+#define SLIMBUS_6_RX 14
+#define SLIMBUS_6_TX 15
+#define PRIMARY_MI2S_RX 16
+#define PRIMARY_MI2S_TX 17
+#define SECONDARY_MI2S_RX 18
+#define SECONDARY_MI2S_TX 19
+#define TERTIARY_MI2S_RX 20
+#define TERTIARY_MI2S_TX 21
+#define QUATERNARY_MI2S_RX 22
+#define QUATERNARY_MI2S_TX 23
+#define PRIMARY_TDM_RX_0 24
+#define PRIMARY_TDM_TX_0 25
+#define PRIMARY_TDM_RX_1 26
+#define PRIMARY_TDM_TX_1 27
+#define PRIMARY_TDM_RX_2 28
+#define PRIMARY_TDM_TX_2 29
+#define PRIMARY_TDM_RX_3 30
+#define PRIMARY_TDM_TX_3 31
+#define PRIMARY_TDM_RX_4 32
+#define PRIMARY_TDM_TX_4 33
+#define PRIMARY_TDM_RX_5 34
+#define PRIMARY_TDM_TX_5 35
+#define PRIMARY_TDM_RX_6 36
+#define PRIMARY_TDM_TX_6 37
+#define PRIMARY_TDM_RX_7 38
+#define PRIMARY_TDM_TX_7 39
+#define SECONDARY_TDM_RX_0 40
+#define SECONDARY_TDM_TX_0 41
+#define SECONDARY_TDM_RX_1 42
+#define SECONDARY_TDM_TX_1 43
+#define SECONDARY_TDM_RX_2 44
+#define SECONDARY_TDM_TX_2 45
+#define SECONDARY_TDM_RX_3 46
+#define SECONDARY_TDM_TX_3 47
+#define SECONDARY_TDM_RX_4 48
+#define SECONDARY_TDM_TX_4 49
+#define SECONDARY_TDM_RX_5 50
+#define SECONDARY_TDM_TX_5 51
+#define SECONDARY_TDM_RX_6 52
+#define SECONDARY_TDM_TX_6 53
+#define SECONDARY_TDM_RX_7 54
+#define SECONDARY_TDM_TX_7 55
+#define TERTIARY_TDM_RX_0 56
+#define TERTIARY_TDM_TX_0 57
+#define TERTIARY_TDM_RX_1 58
+#define TERTIARY_TDM_TX_1 59
+#define TERTIARY_TDM_RX_2 60
+#define TERTIARY_TDM_TX_2 61
+#define TERTIARY_TDM_RX_3 62
+#define TERTIARY_TDM_TX_3 63
+#define TERTIARY_TDM_RX_4 64
+#define TERTIARY_TDM_TX_4 65
+#define TERTIARY_TDM_RX_5 66
+#define TERTIARY_TDM_TX_5 67
+#define TERTIARY_TDM_RX_6 68
+#define TERTIARY_TDM_TX_6 69
+#define TERTIARY_TDM_RX_7 70
+#define TERTIARY_TDM_TX_7 71
+#define QUATERNARY_TDM_RX_0 72
+#define QUATERNARY_TDM_TX_0 73
+#define QUATERNARY_TDM_RX_1 74
+#define QUATERNARY_TDM_TX_1 75
+#define QUATERNARY_TDM_RX_2 76
+#define QUATERNARY_TDM_TX_2 77
+#define QUATERNARY_TDM_RX_3 78
+#define QUATERNARY_TDM_TX_3 79
+#define QUATERNARY_TDM_RX_4 80
+#define QUATERNARY_TDM_TX_4 81
+#define QUATERNARY_TDM_RX_5 82
+#define QUATERNARY_TDM_TX_5 83
+#define QUATERNARY_TDM_RX_6 84
+#define QUATERNARY_TDM_TX_6 85
+#define QUATERNARY_TDM_RX_7 86
+#define QUATERNARY_TDM_TX_7 87
+#define QUINARY_TDM_RX_0 88
+#define QUINARY_TDM_TX_0 89
+#define QUINARY_TDM_RX_1 90
+#define QUINARY_TDM_TX_1 91
+#define QUINARY_TDM_RX_2 92
+#define QUINARY_TDM_TX_2 93
+#define QUINARY_TDM_RX_3 94
+#define QUINARY_TDM_TX_3 95
+#define QUINARY_TDM_RX_4 96
+#define QUINARY_TDM_TX_4 97
+#define QUINARY_TDM_RX_5 98
+#define QUINARY_TDM_TX_5 99
+#define QUINARY_TDM_RX_6 100
+#define QUINARY_TDM_TX_6 101
+#define QUINARY_TDM_RX_7 102
+#define QUINARY_TDM_TX_7 103
+
+#endif /* __DT_BINDINGS_Q6_AFE_H__ */
+
diff --git a/rr-cache/e0eebca18d94a992b7afdfbb051d272595421e25/preimage b/rr-cache/e0eebca18d94a992b7afdfbb051d272595421e25/preimage
new file mode 100644
index 0000000..e4279d9
--- /dev/null
+++ b/rr-cache/e0eebca18d94a992b7afdfbb051d272595421e25/preimage
@@ -0,0 +1,144 @@
+<<<<<<<
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_BINDINGS_Q6_AFE_H__
+#define __DT_BINDINGS_Q6_AFE_H__
+
+/* Audio Front End (AFE) virtual ports IDs */
+#define HDMI_RX 1
+#define SLIMBUS_0_RX 2
+#define SLIMBUS_0_TX 3
+#define SLIMBUS_1_RX 4
+#define SLIMBUS_1_TX 5
+#define SLIMBUS_2_RX 6
+#define SLIMBUS_2_TX 7
+#define SLIMBUS_3_RX 8
+#define SLIMBUS_3_TX 9
+#define SLIMBUS_4_RX 10
+#define SLIMBUS_4_TX 11
+#define SLIMBUS_5_RX 12
+#define SLIMBUS_5_TX 13
+#define SLIMBUS_6_RX 14
+#define SLIMBUS_6_TX 15
+#define PRIMARY_MI2S_RX 16
+#define PRIMARY_MI2S_TX 17
+#define SECONDARY_MI2S_RX 18
+#define SECONDARY_MI2S_TX 19
+#define TERTIARY_MI2S_RX 20
+#define TERTIARY_MI2S_TX 21
+#define QUATERNARY_MI2S_RX 22
+#define QUATERNARY_MI2S_TX 23
+#define PRIMARY_TDM_RX_0 24
+#define PRIMARY_TDM_TX_0 25
+#define PRIMARY_TDM_RX_1 26
+#define PRIMARY_TDM_TX_1 27
+#define PRIMARY_TDM_RX_2 28
+#define PRIMARY_TDM_TX_2 29
+#define PRIMARY_TDM_RX_3 30
+#define PRIMARY_TDM_TX_3 31
+#define PRIMARY_TDM_RX_4 32
+#define PRIMARY_TDM_TX_4 33
+#define PRIMARY_TDM_RX_5 34
+#define PRIMARY_TDM_TX_5 35
+#define PRIMARY_TDM_RX_6 36
+#define PRIMARY_TDM_TX_6 37
+#define PRIMARY_TDM_RX_7 38
+#define PRIMARY_TDM_TX_7 39
+#define SECONDARY_TDM_RX_0 40
+#define SECONDARY_TDM_TX_0 41
+#define SECONDARY_TDM_RX_1 42
+#define SECONDARY_TDM_TX_1 43
+#define SECONDARY_TDM_RX_2 44
+#define SECONDARY_TDM_TX_2 45
+#define SECONDARY_TDM_RX_3 46
+#define SECONDARY_TDM_TX_3 47
+#define SECONDARY_TDM_RX_4 48
+#define SECONDARY_TDM_TX_4 49
+#define SECONDARY_TDM_RX_5 50
+#define SECONDARY_TDM_TX_5 51
+#define SECONDARY_TDM_RX_6 52
+#define SECONDARY_TDM_TX_6 53
+#define SECONDARY_TDM_RX_7 54
+#define SECONDARY_TDM_TX_7 55
+#define TERTIARY_TDM_RX_0 56
+#define TERTIARY_TDM_TX_0 57
+#define TERTIARY_TDM_RX_1 58
+#define TERTIARY_TDM_TX_1 59
+#define TERTIARY_TDM_RX_2 60
+#define TERTIARY_TDM_TX_2 61
+#define TERTIARY_TDM_RX_3 62
+#define TERTIARY_TDM_TX_3 63
+#define TERTIARY_TDM_RX_4 64
+#define TERTIARY_TDM_TX_4 65
+#define TERTIARY_TDM_RX_5 66
+#define TERTIARY_TDM_TX_5 67
+#define TERTIARY_TDM_RX_6 68
+#define TERTIARY_TDM_TX_6 69
+#define TERTIARY_TDM_RX_7 70
+#define TERTIARY_TDM_TX_7 71
+#define QUATERNARY_TDM_RX_0 72
+#define QUATERNARY_TDM_TX_0 73
+#define QUATERNARY_TDM_RX_1 74
+#define QUATERNARY_TDM_TX_1 75
+#define QUATERNARY_TDM_RX_2 76
+#define QUATERNARY_TDM_TX_2 77
+#define QUATERNARY_TDM_RX_3 78
+#define QUATERNARY_TDM_TX_3 79
+#define QUATERNARY_TDM_RX_4 80
+#define QUATERNARY_TDM_TX_4 81
+#define QUATERNARY_TDM_RX_5 82
+#define QUATERNARY_TDM_TX_5 83
+#define QUATERNARY_TDM_RX_6 84
+#define QUATERNARY_TDM_TX_6 85
+#define QUATERNARY_TDM_RX_7 86
+#define QUATERNARY_TDM_TX_7 87
+#define QUINARY_TDM_RX_0 88
+#define QUINARY_TDM_TX_0 89
+#define QUINARY_TDM_RX_1 90
+#define QUINARY_TDM_TX_1 91
+#define QUINARY_TDM_RX_2 92
+#define QUINARY_TDM_TX_2 93
+#define QUINARY_TDM_RX_3 94
+#define QUINARY_TDM_TX_3 95
+#define QUINARY_TDM_RX_4 96
+#define QUINARY_TDM_TX_4 97
+#define QUINARY_TDM_RX_5 98
+#define QUINARY_TDM_TX_5 99
+#define QUINARY_TDM_RX_6 100
+#define QUINARY_TDM_TX_6 101
+#define QUINARY_TDM_RX_7 102
+#define QUINARY_TDM_TX_7 103
+=======
+// SPDX-License-Identifier: GPL-2.0
+#ifndef __DT_BINDINGS_Q6_AFE_H__
+#define __DT_BINDINGS_Q6_AFE_H__
+
+/* Audio Front End (AFE) Ports */
+#define AFE_PORT_HDMI_RX 8
+#define SLIMBUS_0_RX 15
+#define SLIMBUS_0_TX 16
+#define SLIMBUS_1_RX 17
+#define SLIMBUS_1_TX 18
+#define SLIMBUS_2_RX 19
+#define SLIMBUS_2_TX 20
+#define SLIMBUS_3_RX 21
+#define SLIMBUS_3_TX 22
+#define SLIMBUS_4_RX 23
+#define SLIMBUS_4_TX 24
+#define SLIMBUS_5_RX 25
+#define SLIMBUS_5_TX 26
+#define QUATERNARY_MI2S_RX 34
+#define QUATERNARY_MI2S_TX 35
+#define SECONDARY_MI2S_RX 36
+#define SECONDARY_MI2S_TX 37
+#define TERTIARY_MI2S_RX 38
+#define TERTIARY_MI2S_TX 39
+#define PRIMARY_MI2S_RX 40
+#define PRIMARY_MI2S_TX 41
+#define SECONDARY_PCM_RX 42
+#define SECONDARY_PCM_TX 43
+#define SLIMBUS_6_RX 45
+#define SLIMBUS_6_TX 46
+>>>>>>>
+
+#endif /* __DT_BINDINGS_Q6_AFE_H__ */
+
diff --git a/rr-cache/e0eebca18d94a992b7afdfbb051d272595421e25/thisimage b/rr-cache/e0eebca18d94a992b7afdfbb051d272595421e25/thisimage
new file mode 100644
index 0000000..e4279d9
--- /dev/null
+++ b/rr-cache/e0eebca18d94a992b7afdfbb051d272595421e25/thisimage
@@ -0,0 +1,144 @@
+<<<<<<<
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_BINDINGS_Q6_AFE_H__
+#define __DT_BINDINGS_Q6_AFE_H__
+
+/* Audio Front End (AFE) virtual ports IDs */
+#define HDMI_RX 1
+#define SLIMBUS_0_RX 2
+#define SLIMBUS_0_TX 3
+#define SLIMBUS_1_RX 4
+#define SLIMBUS_1_TX 5
+#define SLIMBUS_2_RX 6
+#define SLIMBUS_2_TX 7
+#define SLIMBUS_3_RX 8
+#define SLIMBUS_3_TX 9
+#define SLIMBUS_4_RX 10
+#define SLIMBUS_4_TX 11
+#define SLIMBUS_5_RX 12
+#define SLIMBUS_5_TX 13
+#define SLIMBUS_6_RX 14
+#define SLIMBUS_6_TX 15
+#define PRIMARY_MI2S_RX 16
+#define PRIMARY_MI2S_TX 17
+#define SECONDARY_MI2S_RX 18
+#define SECONDARY_MI2S_TX 19
+#define TERTIARY_MI2S_RX 20
+#define TERTIARY_MI2S_TX 21
+#define QUATERNARY_MI2S_RX 22
+#define QUATERNARY_MI2S_TX 23
+#define PRIMARY_TDM_RX_0 24
+#define PRIMARY_TDM_TX_0 25
+#define PRIMARY_TDM_RX_1 26
+#define PRIMARY_TDM_TX_1 27
+#define PRIMARY_TDM_RX_2 28
+#define PRIMARY_TDM_TX_2 29
+#define PRIMARY_TDM_RX_3 30
+#define PRIMARY_TDM_TX_3 31
+#define PRIMARY_TDM_RX_4 32
+#define PRIMARY_TDM_TX_4 33
+#define PRIMARY_TDM_RX_5 34
+#define PRIMARY_TDM_TX_5 35
+#define PRIMARY_TDM_RX_6 36
+#define PRIMARY_TDM_TX_6 37
+#define PRIMARY_TDM_RX_7 38
+#define PRIMARY_TDM_TX_7 39
+#define SECONDARY_TDM_RX_0 40
+#define SECONDARY_TDM_TX_0 41
+#define SECONDARY_TDM_RX_1 42
+#define SECONDARY_TDM_TX_1 43
+#define SECONDARY_TDM_RX_2 44
+#define SECONDARY_TDM_TX_2 45
+#define SECONDARY_TDM_RX_3 46
+#define SECONDARY_TDM_TX_3 47
+#define SECONDARY_TDM_RX_4 48
+#define SECONDARY_TDM_TX_4 49
+#define SECONDARY_TDM_RX_5 50
+#define SECONDARY_TDM_TX_5 51
+#define SECONDARY_TDM_RX_6 52
+#define SECONDARY_TDM_TX_6 53
+#define SECONDARY_TDM_RX_7 54
+#define SECONDARY_TDM_TX_7 55
+#define TERTIARY_TDM_RX_0 56
+#define TERTIARY_TDM_TX_0 57
+#define TERTIARY_TDM_RX_1 58
+#define TERTIARY_TDM_TX_1 59
+#define TERTIARY_TDM_RX_2 60
+#define TERTIARY_TDM_TX_2 61
+#define TERTIARY_TDM_RX_3 62
+#define TERTIARY_TDM_TX_3 63
+#define TERTIARY_TDM_RX_4 64
+#define TERTIARY_TDM_TX_4 65
+#define TERTIARY_TDM_RX_5 66
+#define TERTIARY_TDM_TX_5 67
+#define TERTIARY_TDM_RX_6 68
+#define TERTIARY_TDM_TX_6 69
+#define TERTIARY_TDM_RX_7 70
+#define TERTIARY_TDM_TX_7 71
+#define QUATERNARY_TDM_RX_0 72
+#define QUATERNARY_TDM_TX_0 73
+#define QUATERNARY_TDM_RX_1 74
+#define QUATERNARY_TDM_TX_1 75
+#define QUATERNARY_TDM_RX_2 76
+#define QUATERNARY_TDM_TX_2 77
+#define QUATERNARY_TDM_RX_3 78
+#define QUATERNARY_TDM_TX_3 79
+#define QUATERNARY_TDM_RX_4 80
+#define QUATERNARY_TDM_TX_4 81
+#define QUATERNARY_TDM_RX_5 82
+#define QUATERNARY_TDM_TX_5 83
+#define QUATERNARY_TDM_RX_6 84
+#define QUATERNARY_TDM_TX_6 85
+#define QUATERNARY_TDM_RX_7 86
+#define QUATERNARY_TDM_TX_7 87
+#define QUINARY_TDM_RX_0 88
+#define QUINARY_TDM_TX_0 89
+#define QUINARY_TDM_RX_1 90
+#define QUINARY_TDM_TX_1 91
+#define QUINARY_TDM_RX_2 92
+#define QUINARY_TDM_TX_2 93
+#define QUINARY_TDM_RX_3 94
+#define QUINARY_TDM_TX_3 95
+#define QUINARY_TDM_RX_4 96
+#define QUINARY_TDM_TX_4 97
+#define QUINARY_TDM_RX_5 98
+#define QUINARY_TDM_TX_5 99
+#define QUINARY_TDM_RX_6 100
+#define QUINARY_TDM_TX_6 101
+#define QUINARY_TDM_RX_7 102
+#define QUINARY_TDM_TX_7 103
+=======
+// SPDX-License-Identifier: GPL-2.0
+#ifndef __DT_BINDINGS_Q6_AFE_H__
+#define __DT_BINDINGS_Q6_AFE_H__
+
+/* Audio Front End (AFE) Ports */
+#define AFE_PORT_HDMI_RX 8
+#define SLIMBUS_0_RX 15
+#define SLIMBUS_0_TX 16
+#define SLIMBUS_1_RX 17
+#define SLIMBUS_1_TX 18
+#define SLIMBUS_2_RX 19
+#define SLIMBUS_2_TX 20
+#define SLIMBUS_3_RX 21
+#define SLIMBUS_3_TX 22
+#define SLIMBUS_4_RX 23
+#define SLIMBUS_4_TX 24
+#define SLIMBUS_5_RX 25
+#define SLIMBUS_5_TX 26
+#define QUATERNARY_MI2S_RX 34
+#define QUATERNARY_MI2S_TX 35
+#define SECONDARY_MI2S_RX 36
+#define SECONDARY_MI2S_TX 37
+#define TERTIARY_MI2S_RX 38
+#define TERTIARY_MI2S_TX 39
+#define PRIMARY_MI2S_RX 40
+#define PRIMARY_MI2S_TX 41
+#define SECONDARY_PCM_RX 42
+#define SECONDARY_PCM_TX 43
+#define SLIMBUS_6_RX 45
+#define SLIMBUS_6_TX 46
+>>>>>>>
+
+#endif /* __DT_BINDINGS_Q6_AFE_H__ */
+
diff --git a/rr-cache/e1c2f3a67c9cac9e94ff139d86cfbac0aec953eb/postimage b/rr-cache/e1c2f3a67c9cac9e94ff139d86cfbac0aec953eb/postimage
new file mode 100644
index 0000000..36caab2
--- /dev/null
+++ b/rr-cache/e1c2f3a67c9cac9e94ff139d86cfbac0aec953eb/postimage
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */
+
+
+#ifndef _DT_BINDINGS_CLK_MSM_RPMH_H
+#define _DT_BINDINGS_CLK_MSM_RPMH_H
+
+/* RPMh controlled clocks */
+#define RPMH_CXO_CLK 0
+#define RPMH_CXO_CLK_A 1
+#define RPMH_LN_BB_CLK2 2
+#define RPMH_LN_BB_CLK2_A 3
+#define RPMH_LN_BB_CLK3 4
+#define RPMH_LN_BB_CLK3_A 5
+#define RPMH_RF_CLK1 6
+#define RPMH_RF_CLK1_A 7
+#define RPMH_RF_CLK2 8
+#define RPMH_RF_CLK2_A 9
+#define RPMH_RF_CLK3 10
+#define RPMH_RF_CLK3_A 11
+#define RPMH_RF_CLK4 12
+#define RPMH_RF_CLK4_A 13
+
+#endif
diff --git a/rr-cache/e1c2f3a67c9cac9e94ff139d86cfbac0aec953eb/preimage b/rr-cache/e1c2f3a67c9cac9e94ff139d86cfbac0aec953eb/preimage
new file mode 100644
index 0000000..9f3efcd
--- /dev/null
+++ b/rr-cache/e1c2f3a67c9cac9e94ff139d86cfbac0aec953eb/preimage
@@ -0,0 +1,34 @@
+<<<<<<<
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */
+
+=======
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+>>>>>>>
+
+#ifndef _DT_BINDINGS_CLK_MSM_RPMH_H
+#define _DT_BINDINGS_CLK_MSM_RPMH_H
+
+/* RPMh controlled clocks */
+#define RPMH_CXO_CLK 0
+#define RPMH_CXO_CLK_A 1
+#define RPMH_LN_BB_CLK2 2
+#define RPMH_LN_BB_CLK2_A 3
+#define RPMH_LN_BB_CLK3 4
+#define RPMH_LN_BB_CLK3_A 5
+#define RPMH_RF_CLK1 6
+#define RPMH_RF_CLK1_A 7
+#define RPMH_RF_CLK2 8
+#define RPMH_RF_CLK2_A 9
+#define RPMH_RF_CLK3 10
+#define RPMH_RF_CLK3_A 11
+<<<<<<<
+=======
+#define RPMH_RF_CLK4 12
+#define RPMH_RF_CLK4_A 13
+>>>>>>>
+
+#endif
diff --git a/rr-cache/e1c2f3a67c9cac9e94ff139d86cfbac0aec953eb/thisimage b/rr-cache/e1c2f3a67c9cac9e94ff139d86cfbac0aec953eb/thisimage
new file mode 100644
index 0000000..9f3efcd
--- /dev/null
+++ b/rr-cache/e1c2f3a67c9cac9e94ff139d86cfbac0aec953eb/thisimage
@@ -0,0 +1,34 @@
+<<<<<<<
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */
+
+=======
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+>>>>>>>
+
+#ifndef _DT_BINDINGS_CLK_MSM_RPMH_H
+#define _DT_BINDINGS_CLK_MSM_RPMH_H
+
+/* RPMh controlled clocks */
+#define RPMH_CXO_CLK 0
+#define RPMH_CXO_CLK_A 1
+#define RPMH_LN_BB_CLK2 2
+#define RPMH_LN_BB_CLK2_A 3
+#define RPMH_LN_BB_CLK3 4
+#define RPMH_LN_BB_CLK3_A 5
+#define RPMH_RF_CLK1 6
+#define RPMH_RF_CLK1_A 7
+#define RPMH_RF_CLK2 8
+#define RPMH_RF_CLK2_A 9
+#define RPMH_RF_CLK3 10
+#define RPMH_RF_CLK3_A 11
+<<<<<<<
+=======
+#define RPMH_RF_CLK4 12
+#define RPMH_RF_CLK4_A 13
+>>>>>>>
+
+#endif
diff --git a/rr-cache/e21fcfcfd5d7e0b79ef526b7b9fb02629f206140/preimage b/rr-cache/e21fcfcfd5d7e0b79ef526b7b9fb02629f206140/preimage
new file mode 100644
index 0000000..c01828c
--- /dev/null
+++ b/rr-cache/e21fcfcfd5d7e0b79ef526b7b9fb02629f206140/preimage
@@ -0,0 +1,451 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. */
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include <soc/qcom/cmd-db.h>
+
+#define NUM_PRIORITY 2
+#define MAX_SLV_ID 8
+<<<<<<<
+#define CMD_DB_MAGIC 0x0C0330DBUL
+#define SLAVE_ID_MASK 0x7
+#define SLAVE_ID_SHIFT 16
+
+#define ENTRY_HEADER(hdr) ((void *)cmd_db_header + \
+ sizeof(*cmd_db_header) + \
+ hdr->header_offset)
+
+#define RSC_OFFSET(hdr, ent) ((void *)cmd_db_header + \
+ sizeof(*cmd_db_header) + \
+ hdr.data_offset + ent.offset)
+
+=======
+#define SLAVE_ID_MASK 0x7
+#define SLAVE_ID_SHIFT 16
+
+>>>>>>>
+/**
+ * struct entry_header: header for each entry in cmddb
+ *
+ * @id: resource's identifier
+ * @priority: unused
+ * @addr: the address of the resource
+ * @len: length of the data
+ * @offset: offset from :@data_offset, start of the data
+ */
+struct entry_header {
+<<<<<<<
+ u64 id;
+ u32 priority[NUM_PRIORITY];
+ u32 addr;
+ u16 len;
+ u16 offset;
+=======
+ u8 id[8];
+ __le32 priority[NUM_PRIORITY];
+ __le32 addr;
+ __le16 len;
+ __le16 offset;
+>>>>>>>
+};
+
+/**
+ * struct rsc_hdr: resource header information
+ *
+ * @slv_id: id for the resource
+ * @header_offset: entry's header at offset from the end of the cmd_db_header
+ * @data_offset: entry's data at offset from the end of the cmd_db_header
+ * @cnt: number of entries for HW type
+ * @version: MSB is major, LSB is minor
+ * @reserved: reserved for future use.
+ */
+struct rsc_hdr {
+<<<<<<<
+ __le16 slv_id;
+ __le16 header_offset;
+ __le16 data_offset;
+ __le16 cnt;
+ __le16 version;
+ __le16 reserved[3];
+=======
+ u16 slv_id;
+ u16 header_offset;
+ u16 data_offset;
+ u16 cnt;
+ u16 version;
+ u16 reserved[3];
+>>>>>>>
+};
+
+/**
+ * struct cmd_db_header: The DB header information
+ *
+ * @version: The cmd db version
+<<<<<<<
+ * @magic: constant expected in the database
+=======
+ * @magic_number: constant expected in the database
+>>>>>>>
+ * @header: array of resources
+ * @checksum: checksum for the header. Unused.
+ * @reserved: reserved memory
+ * @data: driver specific data
+ */
+struct cmd_db_header {
+<<<<<<<
+ __le32 version;
+ u8 magic[4];
+ struct rsc_hdr header[MAX_SLV_ID];
+ __le32 checksum;
+ __le32 reserved;
+=======
+ u32 version;
+ u32 magic_num;
+ struct rsc_hdr header[MAX_SLV_ID];
+ u32 checksum;
+ u32 reserved;
+>>>>>>>
+ u8 data[];
+};
+
+/**
+ * DOC: Description of the Command DB database.
+ *
+ * At the start of the command DB memory is the cmd_db_header structure.
+ * The cmd_db_header holds the version, checksum, magic key as well as an
+ * array for header for each slave (depicted by the rsc_header). Each h/w
+ * based accelerator is a 'slave' (shared resource) and has slave id indicating
+ * the type of accelerator. The rsc_header is the header for such individual
+ * slaves of a given type. The entries for each of these slaves begin at the
+ * rsc_hdr.header_offset. In addition each slave could have auxiliary data
+ * that may be needed by the driver. The data for the slave starts at the
+ * entry_header.offset to the location pointed to by the rsc_hdr.data_offset.
+ *
+ * Drivers have a stringified key to a slave/resource. They can query the slave
+ * information and get the slave id and the auxiliary data and the length of the
+ * data. Using this information, they can format the request to be sent to the
+ * h/w accelerator and request a resource state.
+ */
+
+<<<<<<<
+static const u8 CMD_DB_MAGIC[] = { 0xdb, 0x30, 0x03, 0x0c };
+
+static bool cmd_db_magic_matches(const struct cmd_db_header *header)
+{
+ const u8 *magic = header->magic;
+
+ return memcmp(magic, CMD_DB_MAGIC, ARRAY_SIZE(CMD_DB_MAGIC)) == 0;
+}
+
+static struct cmd_db_header *cmd_db_header;
+
+
+static inline void *rsc_to_entry_header(struct rsc_hdr *hdr)
+{
+ u16 offset = le16_to_cpu(hdr->header_offset);
+
+ return cmd_db_header->data + offset;
+}
+
+static inline void *
+rsc_offset(struct rsc_hdr *hdr, struct entry_header *ent)
+{
+ u16 offset = le16_to_cpu(hdr->data_offset);
+ u16 loffset = le16_to_cpu(ent->offset);
+
+ return cmd_db_header->data + offset + loffset;
+}
+
+=======
+static struct cmd_db_header *cmd_db_header;
+
+>>>>>>>
+/**
+ * cmd_db_ready - Indicates if command DB is available
+ *
+ * Return: 0 on success, errno otherwise
+ */
+int cmd_db_ready(void)
+{
+ if (cmd_db_header == NULL)
+ return -EPROBE_DEFER;
+<<<<<<<
+ else if (!cmd_db_magic_matches(cmd_db_header))
+=======
+ else if (cmd_db_header->magic_num != CMD_DB_MAGIC)
+>>>>>>>
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(cmd_db_ready);
+
+<<<<<<<
+static int cmd_db_get_header(const char *id, struct entry_header *eh,
+=======
+static u64 cmd_db_get_u64_id(const char *id)
+{
+ u64 rsc_id = 0;
+ u8 *ch = (u8 *)&rsc_id;
+
+ strncpy(ch, id, min(strlen(id), sizeof(rsc_id)));
+
+ return rsc_id;
+}
+
+static int cmd_db_get_header(u64 query, struct entry_header *eh,
+>>>>>>>
+ struct rsc_hdr *rh)
+{
+ struct rsc_hdr *rsc_hdr;
+ struct entry_header *ent;
+ int ret, i, j;
+<<<<<<<
+=======
+ u8 query[8];
+>>>>>>>
+
+ ret = cmd_db_ready();
+ if (ret)
+ return ret;
+
+ if (!eh || !rh)
+ return -EINVAL;
+
+<<<<<<<
+=======
+ /* Pad out query string to same length as in DB */
+ strncpy(query, id, sizeof(query));
+
+>>>>>>>
+ for (i = 0; i < MAX_SLV_ID; i++) {
+ rsc_hdr = &cmd_db_header->header[i];
+ if (!rsc_hdr->slv_id)
+ break;
+
+<<<<<<<
+ ent = ENTRY_HEADER(rsc_hdr);
+ for (j = 0; j < rsc_hdr->cnt; j++, ent++) {
+ if (ent->id == query)
+ break;
+ }
+
+ if (j < rsc_hdr->cnt) {
+=======
+ ent = rsc_to_entry_header(rsc_hdr);
+ for (j = 0; j < le16_to_cpu(rsc_hdr->cnt); j++, ent++) {
+ if (memcmp(ent->id, query, sizeof(ent->id)) == 0)
+ break;
+ }
+
+ if (j < le16_to_cpu(rsc_hdr->cnt)) {
+>>>>>>>
+ memcpy(eh, ent, sizeof(*ent));
+ memcpy(rh, rsc_hdr, sizeof(*rh));
+ return 0;
+ }
+ }
+
+ return -ENODEV;
+}
+
+<<<<<<<
+=======
+static int cmd_db_get_header_by_rsc_id(const char *id,
+ struct entry_header *ent_hdr,
+ struct rsc_hdr *rsc_hdr)
+{
+ u64 rsc_id = cmd_db_get_u64_id(id);
+
+ return cmd_db_get_header(rsc_id, ent_hdr, rsc_hdr);
+}
+
+>>>>>>>
+/**
+ * cmd_db_read_addr() - Query command db for resource id address.
+ *
+ * @id: resource id to query for address
+ *
+ * Return: resource address on success, 0 on error
+ *
+ * This is used to retrieve resource address based on resource
+ * id.
+ */
+u32 cmd_db_read_addr(const char *id)
+{
+ int ret;
+ struct entry_header ent;
+ struct rsc_hdr rsc_hdr;
+
+<<<<<<<
+ ret = cmd_db_get_header(id, &ent, &rsc_hdr);
+
+ return ret < 0 ? 0 : le32_to_cpu(ent.addr);
+=======
+ ret = cmd_db_get_header_by_rsc_id(id, &ent, &rsc_hdr);
+
+ return ret < 0 ? 0 : ent.addr;
+>>>>>>>
+}
+EXPORT_SYMBOL(cmd_db_read_addr);
+
+/**
+ * cmd_db_read_aux_data() - Query command db for aux data.
+ *
+ * @id: Resource to retrieve AUX Data on.
+ * @data: Data buffer to copy returned aux data to. Returns size on NULL
+ * @len: Caller provides size of data buffer passed in.
+ *
+ * Return: size of data on success, errno otherwise
+ */
+int cmd_db_read_aux_data(const char *id, u8 *data, size_t len)
+{
+ int ret;
+ struct entry_header ent;
+ struct rsc_hdr rsc_hdr;
+<<<<<<<
+=======
+ u16 ent_len;
+>>>>>>>
+
+ if (!data)
+ return -EINVAL;
+
+<<<<<<<
+ ret = cmd_db_get_header(id, &ent, &rsc_hdr);
+ if (ret)
+ return ret;
+
+ ent_len = le16_to_cpu(ent.len);
+ if (len < ent_len)
+ return -EINVAL;
+
+ len = min_t(u16, ent_len, len);
+ memcpy(data, rsc_offset(&rsc_hdr, &ent), len);
+=======
+ ret = cmd_db_get_header_by_rsc_id(id, &ent, &rsc_hdr);
+ if (ret)
+ return ret;
+
+ if (len < ent.len)
+ return -EINVAL;
+
+ len = min_t(u16, ent.len, len);
+ memcpy(data, RSC_OFFSET(rsc_hdr, ent), len);
+>>>>>>>
+
+ return len;
+}
+EXPORT_SYMBOL(cmd_db_read_aux_data);
+
+/**
+ * cmd_db_read_aux_data_len - Get the length of the auxiliary data stored in DB.
+ *
+ * @id: Resource to retrieve AUX Data.
+ *
+ * Return: size on success, 0 on error
+ */
+size_t cmd_db_read_aux_data_len(const char *id)
+{
+ int ret;
+ struct entry_header ent;
+ struct rsc_hdr rsc_hdr;
+
+<<<<<<<
+ ret = cmd_db_get_header(id, &ent, &rsc_hdr);
+
+ return ret < 0 ? 0 : le16_to_cpu(ent.len);
+=======
+ ret = cmd_db_get_header_by_rsc_id(id, &ent, &rsc_hdr);
+
+ return ret < 0 ? 0 : ent.len;
+>>>>>>>
+}
+EXPORT_SYMBOL(cmd_db_read_aux_data_len);
+
+/**
+ * cmd_db_read_slave_id - Get the slave ID for a given resource address
+ *
+ * @id: Resource id to query the DB for version
+ *
+ * Return: cmd_db_hw_type enum on success, CMD_DB_HW_INVALID on error
+ */
+enum cmd_db_hw_type cmd_db_read_slave_id(const char *id)
+{
+ int ret;
+ struct entry_header ent;
+ struct rsc_hdr rsc_hdr;
+<<<<<<<
+ u32 addr;
+
+ ret = cmd_db_get_header(id, &ent, &rsc_hdr);
+ if (ret < 0)
+ return CMD_DB_HW_INVALID;
+
+ addr = le32_to_cpu(ent.addr);
+ return (addr >> SLAVE_ID_SHIFT) & SLAVE_ID_MASK;
+=======
+
+ ret = cmd_db_get_header_by_rsc_id(id, &ent, &rsc_hdr);
+
+ return ret < 0 ? CMD_DB_HW_INVALID :
+ (ent.addr >> SLAVE_ID_SHIFT) & SLAVE_ID_MASK;
+>>>>>>>
+}
+EXPORT_SYMBOL(cmd_db_read_slave_id);
+
+static int cmd_db_dev_probe(struct platform_device *pdev)
+{
+ struct reserved_mem *rmem;
+ int ret = 0;
+
+ rmem = of_reserved_mem_lookup(pdev->dev.of_node);
+ if (!rmem) {
+ dev_err(&pdev->dev, "failed to acquire memory region\n");
+ return -EINVAL;
+ }
+
+ cmd_db_header = memremap(rmem->base, rmem->size, MEMREMAP_WB);
+ if (IS_ERR_OR_NULL(cmd_db_header)) {
+ ret = PTR_ERR(cmd_db_header);
+ cmd_db_header = NULL;
+ return ret;
+ }
+
+<<<<<<<
+ if (!cmd_db_magic_matches(cmd_db_header)) {
+=======
+ if (cmd_db_header->magic_num != CMD_DB_MAGIC) {
+>>>>>>>
+ dev_err(&pdev->dev, "Invalid Command DB Magic\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id cmd_db_match_table[] = {
+ { .compatible = "qcom,cmd-db" },
+ { },
+};
+
+static struct platform_driver cmd_db_dev_driver = {
+ .probe = cmd_db_dev_probe,
+ .driver = {
+ .name = "cmd-db",
+ .of_match_table = cmd_db_match_table,
+ },
+};
+
+static int __init cmd_db_device_init(void)
+{
+ return platform_driver_register(&cmd_db_dev_driver);
+}
+arch_initcall(cmd_db_device_init);
diff --git a/rr-cache/e49ddc0a8632bffe1c6ec9e189a142814e970dd0/preimage b/rr-cache/e49ddc0a8632bffe1c6ec9e189a142814e970dd0/preimage
new file mode 100644
index 0000000..6a0dc18
--- /dev/null
+++ b/rr-cache/e49ddc0a8632bffe1c6ec9e189a142814e970dd0/preimage
@@ -0,0 +1,8153 @@
+/*
+ * Universal Flash Storage Host controller driver Core
+ *
+ * This code is based on drivers/scsi/ufs/ufshcd.c
+ * Copyright (C) 2011-2013 Samsung India Software Operations
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * Authors:
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
+ *
+ * The Linux Foundation chooses to take subject only to the GPLv2
+ * license terms, and distributes only under these terms.
+ */
+
+#include <linux/async.h>
+#include <linux/devfreq.h>
+#include <linux/nls.h>
+#include <linux/of.h>
+#include <linux/bitfield.h>
+#include "ufshcd.h"
+#include "ufs_quirks.h"
+#include "unipro.h"
+#include "ufs-sysfs.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/ufs.h>
+
+#define UFSHCD_REQ_SENSE_SIZE 18
+
+#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
+ UTP_TASK_REQ_COMPL |\
+ UFSHCD_ERROR_MASK)
+/* UIC command timeout, unit: ms */
+#define UIC_CMD_TIMEOUT 500
+
+/* NOP OUT retries waiting for NOP IN response */
+#define NOP_OUT_RETRIES 10
+/* Timeout after 30 msecs if NOP OUT hangs without response */
+#define NOP_OUT_TIMEOUT 30 /* msecs */
+
+/* Query request retries */
+#define QUERY_REQ_RETRIES 3
+/* Query request timeout */
+#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
+
+/* Task management command timeout */
+#define TM_CMD_TIMEOUT 100 /* msecs */
+
+/* maximum number of retries for a general UIC command */
+#define UFS_UIC_COMMAND_RETRIES 3
+
+/* maximum number of link-startup retries */
+#define DME_LINKSTARTUP_RETRIES 3
+
+/* Maximum retries for Hibern8 enter */
+#define UIC_HIBERN8_ENTER_RETRIES 3
+
+/* maximum number of reset retries before giving up */
+#define MAX_HOST_RESET_RETRIES 5
+
+/* Expose the flag value from utp_upiu_query.value */
+#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
+
+/* Interrupt aggregation default timeout, unit: 40us */
+#define INT_AGGR_DEF_TO 0x02
+
+#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
+ ({ \
+ int _ret; \
+ if (_on) \
+ _ret = ufshcd_enable_vreg(_dev, _vreg); \
+ else \
+ _ret = ufshcd_disable_vreg(_dev, _vreg); \
+ _ret; \
+ })
+
+#define ufshcd_hex_dump(prefix_str, buf, len) \
+print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false)
+
+enum {
+ UFSHCD_MAX_CHANNEL = 0,
+ UFSHCD_MAX_ID = 1,
+ UFSHCD_CMD_PER_LUN = 32,
+ UFSHCD_CAN_QUEUE = 32,
+};
+
+/* UFSHCD states */
+enum {
+ UFSHCD_STATE_RESET,
+ UFSHCD_STATE_ERROR,
+ UFSHCD_STATE_OPERATIONAL,
+ UFSHCD_STATE_EH_SCHEDULED,
+};
+
+/* UFSHCD error handling flags */
+enum {
+ UFSHCD_EH_IN_PROGRESS = (1 << 0),
+};
+
+/* UFSHCD UIC layer error flags */
+enum {
+ UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
+ UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
+ UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
+ UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
+ UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
+ UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
+};
+
+#define ufshcd_set_eh_in_progress(h) \
+ ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
+#define ufshcd_eh_in_progress(h) \
+ ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
+#define ufshcd_clear_eh_in_progress(h) \
+ ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
+
+#define ufshcd_set_ufs_dev_active(h) \
+ ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
+#define ufshcd_set_ufs_dev_sleep(h) \
+ ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
+#define ufshcd_set_ufs_dev_poweroff(h) \
+ ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
+#define ufshcd_is_ufs_dev_active(h) \
+ ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
+#define ufshcd_is_ufs_dev_sleep(h) \
+ ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
+#define ufshcd_is_ufs_dev_poweroff(h) \
+ ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
+
+struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
+ {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
+ {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
+ {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
+ {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
+ {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
+ {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
+};
+
+static inline enum ufs_dev_pwr_mode
+ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
+{
+ return ufs_pm_lvl_states[lvl].dev_state;
+}
+
+static inline enum uic_link_state
+ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
+{
+ return ufs_pm_lvl_states[lvl].link_state;
+}
+
+static inline enum ufs_pm_level
+ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
+ enum uic_link_state link_state)
+{
+ enum ufs_pm_level lvl;
+
+ for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
+ if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
+ (ufs_pm_lvl_states[lvl].link_state == link_state))
+ return lvl;
+ }
+
+ /* if no match found, return the level 0 */
+ return UFS_PM_LVL_0;
+}
+
+static struct ufs_dev_fix ufs_fixups[] = {
+ /* UFS cards deviations table */
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+ UFS_DEVICE_NO_FASTAUTO),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
+ UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
+ UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
+ UFS_DEVICE_QUIRK_PA_TACTIVATE),
+ UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
+ UFS_DEVICE_QUIRK_PA_TACTIVATE),
+ UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
+ UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
+
+ END_FIX
+};
+
+static void ufshcd_tmc_handler(struct ufs_hba *hba);
+static void ufshcd_async_scan(void *data, async_cookie_t cookie);
+static int ufshcd_reset_and_restore(struct ufs_hba *hba);
+static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
+static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
+static void ufshcd_hba_exit(struct ufs_hba *hba);
+static int ufshcd_probe_hba(struct ufs_hba *hba);
+static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
+ bool skip_ref_clk);
+static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
+static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
+static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
+static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
+static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
+static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
+static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
+static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
+static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
+static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
+static irqreturn_t ufshcd_intr(int irq, void *__hba);
+static int ufshcd_change_power_mode(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *pwr_mode);
+static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
+{
+ return tag >= 0 && tag < hba->nutrs;
+}
+
+static inline int ufshcd_enable_irq(struct ufs_hba *hba)
+{
+ int ret = 0;
+
+ if (!hba->is_irq_enabled) {
+ ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
+ hba);
+ if (ret)
+ dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
+ __func__, ret);
+ hba->is_irq_enabled = true;
+ }
+
+ return ret;
+}
+
+static inline void ufshcd_disable_irq(struct ufs_hba *hba)
+{
+ if (hba->is_irq_enabled) {
+ free_irq(hba->irq, hba);
+ hba->is_irq_enabled = false;
+ }
+}
+
+static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
+{
+ if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
+ scsi_unblock_requests(hba->host);
+}
+
+static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
+{
+ if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
+ scsi_block_requests(hba->host);
+}
+
+/* replace non-printable or non-ASCII characters with spaces */
+static inline void ufshcd_remove_non_printable(char *val)
+{
+ if (!val)
+ return;
+
+ if (*val < 0x20 || *val > 0x7e)
+ *val = ' ';
+}
+
+static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
+ const char *str)
+{
+ struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
+
+ trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->sc.cdb);
+}
+
+static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag,
+ const char *str)
+{
+ struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
+
+ trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->qr);
+}
+
+static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
+ const char *str)
+{
+ struct utp_task_req_desc *descp;
+ struct utp_upiu_task_req *task_req;
+ int off = (int)tag - hba->nutrs;
+
+ descp = &hba->utmrdl_base_addr[off];
+ task_req = (struct utp_upiu_task_req *)descp->task_req_upiu;
+ trace_ufshcd_upiu(dev_name(hba->dev), str, &task_req->header,
+ &task_req->input_param1);
+}
+
+static void ufshcd_add_command_trace(struct ufs_hba *hba,
+ unsigned int tag, const char *str)
+{
+ sector_t lba = -1;
+ u8 opcode = 0;
+ u32 intr, doorbell;
+ struct ufshcd_lrb *lrbp;
+ int transfer_len = -1;
+
+ /* trace UPIU also */
+ ufshcd_add_cmd_upiu_trace(hba, tag, str);
+
+ if (!trace_ufshcd_command_enabled())
+ return;
+
+ lrbp = &hba->lrb[tag];
+
+ if (lrbp->cmd) { /* data phase exists */
+ opcode = (u8)(*lrbp->cmd->cmnd);
+ if ((opcode == READ_10) || (opcode == WRITE_10)) {
+ /*
+ * Currently we only fully trace read(10) and write(10)
+ * commands
+ */
+ if (lrbp->cmd->request && lrbp->cmd->request->bio)
+ lba =
+ lrbp->cmd->request->bio->bi_iter.bi_sector;
+ transfer_len = be32_to_cpu(
+ lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
+ }
+ }
+
+ intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+ doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ trace_ufshcd_command(dev_name(hba->dev), str, tag,
+ doorbell, transfer_len, intr, lba, opcode);
+}
+
+static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
+{
+ struct ufs_clk_info *clki;
+ struct list_head *head = &hba->clk_list_head;
+
+ if (list_empty(head))
+ return;
+
+ list_for_each_entry(clki, head, list) {
+ if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
+ clki->max_freq)
+ dev_err(hba->dev, "clk: %s, rate: %u\n",
+ clki->name, clki->curr_freq);
+ }
+}
+
+static void ufshcd_print_uic_err_hist(struct ufs_hba *hba,
+ struct ufs_uic_err_reg_hist *err_hist, char *err_name)
+{
+ int i;
+
+ for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) {
+ int p = (i + err_hist->pos - 1) % UIC_ERR_REG_HIST_LENGTH;
+
+ if (err_hist->reg[p] == 0)
+ continue;
+ dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, i,
+ err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
+ }
+}
+
+static void ufshcd_print_host_regs(struct ufs_hba *hba)
+{
+ /*
+ * hex_dump reads its data without the readl macro. This might
+ * cause inconsistency issues on some platform, as the printed
+ * values may be from cache and not the most recent value.
+ * To know whether you are looking at an un-cached version verify
+ * that IORESOURCE_MEM flag is on when xxx_get_resource() is invoked
+ * during platform/pci probe function.
+ */
+ ufshcd_hex_dump("host regs: ", hba->mmio_base, UFSHCI_REG_SPACE_SIZE);
+ dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n",
+ hba->ufs_version, hba->capabilities);
+ dev_err(hba->dev,
+ "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x\n",
+ (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
+ dev_err(hba->dev,
+ "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d\n",
+ ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
+ hba->ufs_stats.hibern8_exit_cnt);
+
+ ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
+ ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
+ ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
+ ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
+ ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
+
+ ufshcd_print_clk_freqs(hba);
+
+ if (hba->vops && hba->vops->dbg_register_dump)
+ hba->vops->dbg_register_dump(hba);
+}
+
+static
+void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
+{
+ struct ufshcd_lrb *lrbp;
+ int prdt_length;
+ int tag;
+
+ for_each_set_bit(tag, &bitmap, hba->nutrs) {
+ lrbp = &hba->lrb[tag];
+
+ dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
+ tag, ktime_to_us(lrbp->issue_time_stamp));
+ dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
+ tag, ktime_to_us(lrbp->compl_time_stamp));
+ dev_err(hba->dev,
+ "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
+ tag, (u64)lrbp->utrd_dma_addr);
+
+ ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
+ sizeof(struct utp_transfer_req_desc));
+ dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
+ (u64)lrbp->ucd_req_dma_addr);
+ ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
+ sizeof(struct utp_upiu_req));
+ dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
+ (u64)lrbp->ucd_rsp_dma_addr);
+ ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
+ sizeof(struct utp_upiu_rsp));
+
+ prdt_length = le16_to_cpu(
+ lrbp->utr_descriptor_ptr->prd_table_length);
+ dev_err(hba->dev,
+ "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
+ tag, prdt_length,
+ (u64)lrbp->ucd_prdt_dma_addr);
+
+ if (pr_prdt)
+ ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
+ sizeof(struct ufshcd_sg_entry) * prdt_length);
+ }
+}
+
+static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
+{
+ struct utp_task_req_desc *tmrdp;
+ int tag;
+
+ for_each_set_bit(tag, &bitmap, hba->nutmrs) {
+ tmrdp = &hba->utmrdl_base_addr[tag];
+ dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
+ ufshcd_hex_dump("TM TRD: ", &tmrdp->header,
+ sizeof(struct request_desc_header));
+ dev_err(hba->dev, "TM[%d] - Task Management Request UPIU\n",
+ tag);
+ ufshcd_hex_dump("TM REQ: ", tmrdp->task_req_upiu,
+ sizeof(struct utp_upiu_req));
+ dev_err(hba->dev, "TM[%d] - Task Management Response UPIU\n",
+ tag);
+ ufshcd_hex_dump("TM RSP: ", tmrdp->task_rsp_upiu,
+ sizeof(struct utp_task_req_desc));
+ }
+}
+
+static void ufshcd_print_host_state(struct ufs_hba *hba)
+{
+ dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
+ dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
+ hba->lrb_in_use, hba->outstanding_reqs, hba->outstanding_tasks);
+ dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
+ hba->saved_err, hba->saved_uic_err);
+ dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
+ dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
+ hba->pm_op_in_progress, hba->is_sys_suspended);
+ dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
+ hba->auto_bkops_enabled, hba->host->host_self_blocked);
+ dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
+ dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
+ hba->eh_flags, hba->req_abort_count);
+ dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
+ hba->capabilities, hba->caps);
+ dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
+ hba->dev_quirks);
+}
+
+/**
+ * ufshcd_print_pwr_info - print power params as saved in hba
+ * power info
+ * @hba: per-adapter instance
+ */
+static void ufshcd_print_pwr_info(struct ufs_hba *hba)
+{
+ static const char * const names[] = {
+ "INVALID MODE",
+ "FAST MODE",
+ "SLOW_MODE",
+ "INVALID MODE",
+ "FASTAUTO_MODE",
+ "SLOWAUTO_MODE",
+ "INVALID MODE",
+ };
+
+ dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
+ __func__,
+ hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
+ hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
+ names[hba->pwr_info.pwr_rx],
+ names[hba->pwr_info.pwr_tx],
+ hba->pwr_info.hs_rate);
+}
+
+/*
+ * ufshcd_wait_for_register - wait for register value to change
+ * @hba - per-adapter interface
+ * @reg - mmio register offset
+ * @mask - mask to apply to read register value
+ * @val - wait condition
+ * @interval_us - polling interval in microsecs
+ * @timeout_ms - timeout in millisecs
+ * @can_sleep - perform sleep or just spin
+ *
+ * Returns -ETIMEDOUT on error, zero on success
+ */
+int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
+ u32 val, unsigned long interval_us,
+ unsigned long timeout_ms, bool can_sleep)
+{
+ int err = 0;
+ unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
+
+ /* ignore bits that we don't intend to wait on */
+ val = val & mask;
+
+ while ((ufshcd_readl(hba, reg) & mask) != val) {
+ if (can_sleep)
+ usleep_range(interval_us, interval_us + 50);
+ else
+ udelay(interval_us);
+ if (time_after(jiffies, timeout)) {
+ if ((ufshcd_readl(hba, reg) & mask) != val)
+ err = -ETIMEDOUT;
+ break;
+ }
+ }
+
+ return err;
+}
+
+/**
+ * ufshcd_get_intr_mask - Get the interrupt bit mask
+ * @hba: Pointer to adapter instance
+ *
+ * Returns interrupt bit mask per version
+ */
+static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
+{
+ u32 intr_mask = 0;
+
+ switch (hba->ufs_version) {
+ case UFSHCI_VERSION_10:
+ intr_mask = INTERRUPT_MASK_ALL_VER_10;
+ break;
+ case UFSHCI_VERSION_11:
+ case UFSHCI_VERSION_20:
+ intr_mask = INTERRUPT_MASK_ALL_VER_11;
+ break;
+ case UFSHCI_VERSION_21:
+ default:
+ intr_mask = INTERRUPT_MASK_ALL_VER_21;
+ break;
+ }
+
+ return intr_mask;
+}
+
+/**
+ * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
+ * @hba: Pointer to adapter instance
+ *
+ * Returns UFSHCI version supported by the controller
+ */
+static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
+{
+ if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
+ return ufshcd_vops_get_ufs_hci_version(hba);
+
+ return ufshcd_readl(hba, REG_UFS_VERSION);
+}
+
+/**
+ * ufshcd_is_device_present - Check if any device connected to
+ * the host controller
+ * @hba: pointer to adapter instance
+ *
+ * Returns true if device present, false if no device detected
+ */
+static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
+{
+ return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
+ DEVICE_PRESENT) ? true : false;
+}
+
+/**
+ * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
+ * @lrbp: pointer to local command reference block
+ *
+ * This function is used to get the OCS field from UTRD
+ * Returns the OCS field in the UTRD
+ */
+static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
+{
+ return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
+}
+
+/**
+ * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
+ * @task_req_descp: pointer to utp_task_req_desc structure
+ *
+ * This function is used to get the OCS field from UTMRD
+ * Returns the OCS field in the UTMRD
+ */
+static inline int
+ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
+{
+ return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
+}
+
+/**
+ * ufshcd_get_tm_free_slot - get a free slot for task management request
+ * @hba: per adapter instance
+ * @free_slot: pointer to variable with available slot value
+ *
+ * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
+ * Returns 0 if free slot is not available, else return 1 with tag value
+ * in @free_slot.
+ */
+static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
+{
+ int tag;
+ bool ret = false;
+
+ if (!free_slot)
+ goto out;
+
+ do {
+ tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
+ if (tag >= hba->nutmrs)
+ goto out;
+ } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
+
+ *free_slot = tag;
+ ret = true;
+out:
+ return ret;
+}
+
+static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
+{
+ clear_bit_unlock(slot, &hba->tm_slots_in_use);
+}
+
+/**
+ * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
+ * @hba: per adapter instance
+ * @pos: position of the bit to be cleared
+ */
+static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
+{
+ if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
+ ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
+ else
+ ufshcd_writel(hba, ~(1 << pos),
+ REG_UTP_TRANSFER_REQ_LIST_CLEAR);
+}
+
+/**
+ * ufshcd_utmrl_clear - Clear a bit in UTRMLCLR register
+ * @hba: per adapter instance
+ * @pos: position of the bit to be cleared
+ */
+static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
+{
+ if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
+ ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
+ else
+ ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
+}
+
+/**
+ * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
+ * @hba: per adapter instance
+ * @tag: position of the bit to be cleared
+ */
+static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
+{
+ __clear_bit(tag, &hba->outstanding_reqs);
+}
+
+/**
+ * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
+ * @reg: Register value of host controller status
+ *
+ * Returns integer, 0 on Success and positive value if failed
+ */
+static inline int ufshcd_get_lists_status(u32 reg)
+{
+ return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
+}
+
+/**
+ * ufshcd_get_uic_cmd_result - Get the UIC command result
+ * @hba: Pointer to adapter instance
+ *
+ * This function gets the result of UIC command completion
+ * Returns 0 on success, non zero value on error
+ */
+static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
+{
+ return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
+ MASK_UIC_COMMAND_RESULT;
+}
+
+/**
+ * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
+ * @hba: Pointer to adapter instance
+ *
+ * This function gets UIC command argument3
+ * Returns 0 on success, non zero value on error
+ */
+static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
+{
+ return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
+}
+
+/**
+ * ufshcd_get_req_rsp - returns the TR response transaction type
+ * @ucd_rsp_ptr: pointer to response UPIU
+ */
+static inline int
+ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
+{
+ return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
+}
+
+/**
+ * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
+ * @ucd_rsp_ptr: pointer to response UPIU
+ *
+ * This function gets the response status and scsi_status from response UPIU
+ * Returns the response result code.
+ */
+static inline int
+ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
+{
+ return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
+}
+
+/*
+ * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
+ * from response UPIU
+ * @ucd_rsp_ptr: pointer to response UPIU
+ *
+ * Return the data segment length.
+ */
+static inline unsigned int
+ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
+{
+ return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
+ MASK_RSP_UPIU_DATA_SEG_LEN;
+}
+
+/**
+ * ufshcd_is_exception_event - Check if the device raised an exception event
+ * @ucd_rsp_ptr: pointer to response UPIU
+ *
+ * The function checks if the device raised an exception event indicated in
+ * the Device Information field of response UPIU.
+ *
+ * Returns true if exception is raised, false otherwise.
+ */
+static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
+{
+ return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
+ MASK_RSP_EXCEPTION_EVENT ? true : false;
+}
+
+/**
+ * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
+ * @hba: per adapter instance
+ */
+static inline void
+ufshcd_reset_intr_aggr(struct ufs_hba *hba)
+{
+ ufshcd_writel(hba, INT_AGGR_ENABLE |
+ INT_AGGR_COUNTER_AND_TIMER_RESET,
+ REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
+}
+
+/**
+ * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
+ * @hba: per adapter instance
+ * @cnt: Interrupt aggregation counter threshold
+ * @tmout: Interrupt aggregation timeout value
+ */
+static inline void
+ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
+{
+ ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
+ INT_AGGR_COUNTER_THLD_VAL(cnt) |
+ INT_AGGR_TIMEOUT_VAL(tmout),
+ REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
+}
+
+/**
+ * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
+ * @hba: per adapter instance
+ */
+static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
+{
+ ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
+}
+
+/**
+ * ufshcd_enable_run_stop_reg - Enable run-stop registers,
+ * When run-stop registers are set to 1, it indicates the
+ * host controller that it can process the requests
+ * @hba: per adapter instance
+ */
+static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
+{
+ ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
+ REG_UTP_TASK_REQ_LIST_RUN_STOP);
+ ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
+ REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
+}
+
+/**
+ * ufshcd_hba_start - Start controller initialization sequence
+ * @hba: per adapter instance
+ */
+static inline void ufshcd_hba_start(struct ufs_hba *hba)
+{
+ ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
+}
+
+/**
+ * ufshcd_is_hba_active - Get controller state
+ * @hba: per adapter instance
+ *
+ * Returns false if controller is active, true otherwise
+ */
+static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
+{
+ return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
+ ? false : true;
+}
+
+u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
+{
+ /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
+ if ((hba->ufs_version == UFSHCI_VERSION_10) ||
+ (hba->ufs_version == UFSHCI_VERSION_11))
+ return UFS_UNIPRO_VER_1_41;
+ else
+ return UFS_UNIPRO_VER_1_6;
+}
+EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
+
+static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
+{
+ /*
+ * If both host and device support UniPro ver1.6 or later, PA layer
+ * parameters tuning happens during link startup itself.
+ *
+ * We can manually tune PA layer parameters if either host or device
+ * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
+ * logic simple, we will only do manual tuning if local unipro version
+ * doesn't support ver1.6 or later.
+ */
+ if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
+ return true;
+ else
+ return false;
+}
+
+static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
+{
+ int ret = 0;
+ struct ufs_clk_info *clki;
+ struct list_head *head = &hba->clk_list_head;
+ ktime_t start = ktime_get();
+ bool clk_state_changed = false;
+
+ if (list_empty(head))
+ goto out;
+
+ ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(clki, head, list) {
+ if (!IS_ERR_OR_NULL(clki->clk)) {
+ if (scale_up && clki->max_freq) {
+ if (clki->curr_freq == clki->max_freq)
+ continue;
+
+ clk_state_changed = true;
+ ret = clk_set_rate(clki->clk, clki->max_freq);
+ if (ret) {
+ dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
+ __func__, clki->name,
+ clki->max_freq, ret);
+ break;
+ }
+ trace_ufshcd_clk_scaling(dev_name(hba->dev),
+ "scaled up", clki->name,
+ clki->curr_freq,
+ clki->max_freq);
+
+ clki->curr_freq = clki->max_freq;
+
+ } else if (!scale_up && clki->min_freq) {
+ if (clki->curr_freq == clki->min_freq)
+ continue;
+
+ clk_state_changed = true;
+ ret = clk_set_rate(clki->clk, clki->min_freq);
+ if (ret) {
+ dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
+ __func__, clki->name,
+ clki->min_freq, ret);
+ break;
+ }
+ trace_ufshcd_clk_scaling(dev_name(hba->dev),
+ "scaled down", clki->name,
+ clki->curr_freq,
+ clki->min_freq);
+ clki->curr_freq = clki->min_freq;
+ }
+ }
+ dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
+ clki->name, clk_get_rate(clki->clk));
+ }
+
+ ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
+
+out:
+ if (clk_state_changed)
+ trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
+ (scale_up ? "up" : "down"),
+ ktime_to_us(ktime_sub(ktime_get(), start)), ret);
+ return ret;
+}
+
+/**
+ * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
+ * @hba: per adapter instance
+ * @scale_up: True if scaling up and false if scaling down
+ *
+ * Returns true if scaling is required, false otherwise.
+ */
+static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
+ bool scale_up)
+{
+ struct ufs_clk_info *clki;
+ struct list_head *head = &hba->clk_list_head;
+
+ if (list_empty(head))
+ return false;
+
+ list_for_each_entry(clki, head, list) {
+ if (!IS_ERR_OR_NULL(clki->clk)) {
+ if (scale_up && clki->max_freq) {
+ if (clki->curr_freq == clki->max_freq)
+ continue;
+ return true;
+ } else if (!scale_up && clki->min_freq) {
+ if (clki->curr_freq == clki->min_freq)
+ continue;
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
+ u64 wait_timeout_us)
+{
+ unsigned long flags;
+ int ret = 0;
+ u32 tm_doorbell;
+ u32 tr_doorbell;
+ bool timeout = false, do_last_check = false;
+ ktime_t start;
+
+ ufshcd_hold(hba, false);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ /*
+ * Wait for all the outstanding tasks/transfer requests.
+ * Verify by checking the doorbell registers are clear.
+ */
+ start = ktime_get();
+ do {
+ if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
+ tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ if (!tm_doorbell && !tr_doorbell) {
+ timeout = false;
+ break;
+ } else if (do_last_check) {
+ break;
+ }
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ schedule();
+ if (ktime_to_us(ktime_sub(ktime_get(), start)) >
+ wait_timeout_us) {
+ timeout = true;
+ /*
+ * We might have scheduled out for long time so make
+ * sure to check if doorbells are cleared by this time
+ * or not.
+ */
+ do_last_check = true;
+ }
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ } while (tm_doorbell || tr_doorbell);
+
+ if (timeout) {
+ dev_err(hba->dev,
+ "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
+ __func__, tm_doorbell, tr_doorbell);
+ ret = -EBUSY;
+ }
+out:
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ufshcd_release(hba);
+ return ret;
+}
+
+/**
+ * ufshcd_scale_gear - scale up/down UFS gear
+ * @hba: per adapter instance
+ * @scale_up: True for scaling up gear and false for scaling down
+ *
+ * Returns 0 for success,
+ * Returns -EBUSY if scaling can't happen at this time
+ * Returns non-zero for any other errors
+ */
+static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
+{
+ #define UFS_MIN_GEAR_TO_SCALE_DOWN UFS_HS_G1
+ int ret = 0;
+ struct ufs_pa_layer_attr new_pwr_info;
+
+ if (scale_up) {
+ memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
+ sizeof(struct ufs_pa_layer_attr));
+ } else {
+ memcpy(&new_pwr_info, &hba->pwr_info,
+ sizeof(struct ufs_pa_layer_attr));
+
+ if (hba->pwr_info.gear_tx > UFS_MIN_GEAR_TO_SCALE_DOWN
+ || hba->pwr_info.gear_rx > UFS_MIN_GEAR_TO_SCALE_DOWN) {
+ /* save the current power mode */
+ memcpy(&hba->clk_scaling.saved_pwr_info.info,
+ &hba->pwr_info,
+ sizeof(struct ufs_pa_layer_attr));
+
+ /* scale down gear */
+ new_pwr_info.gear_tx = UFS_MIN_GEAR_TO_SCALE_DOWN;
+ new_pwr_info.gear_rx = UFS_MIN_GEAR_TO_SCALE_DOWN;
+ }
+ }
+
+ /* check if the power mode needs to be changed or not? */
+ ret = ufshcd_change_power_mode(hba, &new_pwr_info);
+
+ if (ret)
+ dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
+ __func__, ret,
+ hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
+ new_pwr_info.gear_tx, new_pwr_info.gear_rx);
+
+ return ret;
+}
+
+static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
+{
+ #define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */
+ int ret = 0;
+ /*
+ * make sure that there are no outstanding requests when
+ * clock scaling is in progress
+ */
+ ufshcd_scsi_block_requests(hba);
+ down_write(&hba->clk_scaling_lock);
+ if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
+ ret = -EBUSY;
+ up_write(&hba->clk_scaling_lock);
+ ufshcd_scsi_unblock_requests(hba);
+ }
+
+ return ret;
+}
+
+static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
+{
+ up_write(&hba->clk_scaling_lock);
+ ufshcd_scsi_unblock_requests(hba);
+}
+
+/**
+ * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
+ * @hba: per adapter instance
+ * @scale_up: True for scaling up and false for scalin down
+ *
+ * Returns 0 for success,
+ * Returns -EBUSY if scaling can't happen at this time
+ * Returns non-zero for any other errors
+ */
+static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
+{
+ int ret = 0;
+
+ /* let's not get into low power until clock scaling is completed */
+ ufshcd_hold(hba, false);
+
+ ret = ufshcd_clock_scaling_prepare(hba);
+ if (ret)
+ return ret;
+
+ /* scale down the gear before scaling down clocks */
+ if (!scale_up) {
+ ret = ufshcd_scale_gear(hba, false);
+ if (ret)
+ goto out;
+ }
+
+ ret = ufshcd_scale_clks(hba, scale_up);
+ if (ret) {
+ if (!scale_up)
+ ufshcd_scale_gear(hba, true);
+ goto out;
+ }
+
+ /* scale up the gear after scaling up clocks */
+ if (scale_up) {
+ ret = ufshcd_scale_gear(hba, true);
+ if (ret) {
+ ufshcd_scale_clks(hba, false);
+ goto out;
+ }
+ }
+
+ ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
+
+out:
+ ufshcd_clock_scaling_unprepare(hba);
+ ufshcd_release(hba);
+ return ret;
+}
+
+static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
+{
+ struct ufs_hba *hba = container_of(work, struct ufs_hba,
+ clk_scaling.suspend_work);
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(hba->host->host_lock, irq_flags);
+ if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ return;
+ }
+ hba->clk_scaling.is_suspended = true;
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+
+ __ufshcd_suspend_clkscaling(hba);
+}
+
+static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
+{
+ struct ufs_hba *hba = container_of(work, struct ufs_hba,
+ clk_scaling.resume_work);
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(hba->host->host_lock, irq_flags);
+ if (!hba->clk_scaling.is_suspended) {
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ return;
+ }
+ hba->clk_scaling.is_suspended = false;
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+
+ devfreq_resume_device(hba->devfreq);
+}
+
+static int ufshcd_devfreq_target(struct device *dev,
+ unsigned long *freq, u32 flags)
+{
+ int ret = 0;
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ ktime_t start;
+ bool scale_up, sched_clk_scaling_suspend_work = false;
+ struct list_head *clk_list = &hba->clk_list_head;
+ struct ufs_clk_info *clki;
+ unsigned long irq_flags;
+
+ if (!ufshcd_is_clkscaling_supported(hba))
+ return -EINVAL;
+
+ spin_lock_irqsave(hba->host->host_lock, irq_flags);
+ if (ufshcd_eh_in_progress(hba)) {
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ return 0;
+ }
+
+ if (!hba->clk_scaling.active_reqs)
+ sched_clk_scaling_suspend_work = true;
+
+ if (list_empty(clk_list)) {
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ goto out;
+ }
+
+ clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
+ scale_up = (*freq == clki->max_freq) ? true : false;
+ if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ ret = 0;
+ goto out; /* no state change required */
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+
+ start = ktime_get();
+ ret = ufshcd_devfreq_scale(hba, scale_up);
+
+ trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
+ (scale_up ? "up" : "down"),
+ ktime_to_us(ktime_sub(ktime_get(), start)), ret);
+
+out:
+ if (sched_clk_scaling_suspend_work)
+ queue_work(hba->clk_scaling.workq,
+ &hba->clk_scaling.suspend_work);
+
+ return ret;
+}
+
+
+static int ufshcd_devfreq_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *stat)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_clk_scaling *scaling = &hba->clk_scaling;
+ unsigned long flags;
+
+ if (!ufshcd_is_clkscaling_supported(hba))
+ return -EINVAL;
+
+ memset(stat, 0, sizeof(*stat));
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (!scaling->window_start_t)
+ goto start_window;
+
+ if (scaling->is_busy_started)
+ scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
+ scaling->busy_start_t));
+
+ stat->total_time = jiffies_to_usecs((long)jiffies -
+ (long)scaling->window_start_t);
+ stat->busy_time = scaling->tot_busy_t;
+start_window:
+ scaling->window_start_t = jiffies;
+ scaling->tot_busy_t = 0;
+
+ if (hba->outstanding_reqs) {
+ scaling->busy_start_t = ktime_get();
+ scaling->is_busy_started = true;
+ } else {
+ scaling->busy_start_t = 0;
+ scaling->is_busy_started = false;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return 0;
+}
+
+static struct devfreq_dev_profile ufs_devfreq_profile = {
+ .polling_ms = 100,
+ .target = ufshcd_devfreq_target,
+ .get_dev_status = ufshcd_devfreq_get_dev_status,
+};
+
+static int ufshcd_devfreq_init(struct ufs_hba *hba)
+{
+<<<<<<<
+ struct devfreq *devfreq;
+ int ret;
+
+ devfreq = devm_devfreq_add_device(hba->dev,
+ &ufs_devfreq_profile,
+ "simple_ondemand",
+=======
+ struct list_head *clk_list = &hba->clk_list_head;
+ struct ufs_clk_info *clki;
+ struct devfreq *devfreq;
+ int ret;
+
+ /* Skip devfreq if we don't have any clocks in the list */
+ if (list_empty(clk_list))
+ return 0;
+
+ clki = list_first_entry(clk_list, struct ufs_clk_info, list);
+ dev_pm_opp_add(hba->dev, clki->min_freq, 0);
+ dev_pm_opp_add(hba->dev, clki->max_freq, 0);
+
+ devfreq = devfreq_add_device(hba->dev,
+ &ufs_devfreq_profile,
+ DEVFREQ_GOV_SIMPLE_ONDEMAND,
+>>>>>>>
+ NULL);
+ if (IS_ERR(devfreq)) {
+ ret = PTR_ERR(devfreq);
+ dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
+<<<<<<<
+=======
+
+ dev_pm_opp_remove(hba->dev, clki->min_freq);
+ dev_pm_opp_remove(hba->dev, clki->max_freq);
+>>>>>>>
+ return ret;
+ }
+
+ hba->devfreq = devfreq;
+
+ return 0;
+}
+
+<<<<<<<
+=======
+static void ufshcd_devfreq_remove(struct ufs_hba *hba)
+{
+ struct list_head *clk_list = &hba->clk_list_head;
+ struct ufs_clk_info *clki;
+
+ if (!hba->devfreq)
+ return;
+
+ devfreq_remove_device(hba->devfreq);
+ hba->devfreq = NULL;
+
+ clki = list_first_entry(clk_list, struct ufs_clk_info, list);
+ dev_pm_opp_remove(hba->dev, clki->min_freq);
+ dev_pm_opp_remove(hba->dev, clki->max_freq);
+}
+
+>>>>>>>
+static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
+{
+ unsigned long flags;
+
+ devfreq_suspend_device(hba->devfreq);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->clk_scaling.window_start_t = 0;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+
+static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
+{
+ unsigned long flags;
+ bool suspend = false;
+
+ if (!ufshcd_is_clkscaling_supported(hba))
+ return;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (!hba->clk_scaling.is_suspended) {
+ suspend = true;
+ hba->clk_scaling.is_suspended = true;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ if (suspend)
+ __ufshcd_suspend_clkscaling(hba);
+}
+
+static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
+{
+ unsigned long flags;
+ bool resume = false;
+
+ if (!ufshcd_is_clkscaling_supported(hba))
+ return;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (hba->clk_scaling.is_suspended) {
+ resume = true;
+ hba->clk_scaling.is_suspended = false;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ if (resume)
+ devfreq_resume_device(hba->devfreq);
+}
+
+static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
+}
+
+static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ u32 value;
+ int err;
+
+ if (kstrtou32(buf, 0, &value))
+ return -EINVAL;
+
+ value = !!value;
+ if (value == hba->clk_scaling.is_allowed)
+ goto out;
+
+ pm_runtime_get_sync(hba->dev);
+ ufshcd_hold(hba, false);
+
+ cancel_work_sync(&hba->clk_scaling.suspend_work);
+ cancel_work_sync(&hba->clk_scaling.resume_work);
+
+ hba->clk_scaling.is_allowed = value;
+
+ if (value) {
+ ufshcd_resume_clkscaling(hba);
+ } else {
+ ufshcd_suspend_clkscaling(hba);
+ err = ufshcd_devfreq_scale(hba, true);
+ if (err)
+ dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
+ __func__, err);
+ }
+
+ ufshcd_release(hba);
+ pm_runtime_put_sync(hba->dev);
+out:
+ return count;
+}
+
+static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
+{
+ hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
+ hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
+ sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
+ hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
+ hba->clk_scaling.enable_attr.attr.mode = 0644;
+ if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
+ dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
+}
+
+static void ufshcd_ungate_work(struct work_struct *work)
+{
+ int ret;
+ unsigned long flags;
+ struct ufs_hba *hba = container_of(work, struct ufs_hba,
+ clk_gating.ungate_work);
+
+ cancel_delayed_work_sync(&hba->clk_gating.gate_work);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (hba->clk_gating.state == CLKS_ON) {
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ goto unblock_reqs;
+ }
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ufshcd_setup_clocks(hba, true);
+
+ /* Exit from hibern8 */
+ if (ufshcd_can_hibern8_during_gating(hba)) {
+ /* Prevent gating in this path */
+ hba->clk_gating.is_suspended = true;
+ if (ufshcd_is_link_hibern8(hba)) {
+ ret = ufshcd_uic_hibern8_exit(hba);
+ if (ret)
+ dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
+ __func__, ret);
+ else
+ ufshcd_set_link_active(hba);
+ }
+ hba->clk_gating.is_suspended = false;
+ }
+unblock_reqs:
+ ufshcd_scsi_unblock_requests(hba);
+}
+
+/**
+ * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
+ * Also, exit from hibern8 mode and set the link as active.
+ * @hba: per adapter instance
+ * @async: This indicates whether caller should ungate clocks asynchronously.
+ */
+int ufshcd_hold(struct ufs_hba *hba, bool async)
+{
+ int rc = 0;
+ unsigned long flags;
+
+ if (!ufshcd_is_clkgating_allowed(hba))
+ goto out;
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->clk_gating.active_reqs++;
+
+ if (ufshcd_eh_in_progress(hba)) {
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return 0;
+ }
+
+start:
+ switch (hba->clk_gating.state) {
+ case CLKS_ON:
+ /*
+ * Wait for the ungate work to complete if in progress.
+ * Though the clocks may be in ON state, the link could
+ * still be in hibner8 state if hibern8 is allowed
+ * during clock gating.
+ * Make sure we exit hibern8 state also in addition to
+ * clocks being ON.
+ */
+ if (ufshcd_can_hibern8_during_gating(hba) &&
+ ufshcd_is_link_hibern8(hba)) {
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ flush_work(&hba->clk_gating.ungate_work);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ goto start;
+ }
+ break;
+ case REQ_CLKS_OFF:
+ if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
+ hba->clk_gating.state = CLKS_ON;
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
+ hba->clk_gating.state);
+ break;
+ }
+ /*
+ * If we are here, it means gating work is either done or
+ * currently running. Hence, fall through to cancel gating
+ * work and to enable clocks.
+ */
+ case CLKS_OFF:
+ ufshcd_scsi_block_requests(hba);
+ hba->clk_gating.state = REQ_CLKS_ON;
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
+ hba->clk_gating.state);
+ queue_work(hba->clk_gating.clk_gating_workq,
+ &hba->clk_gating.ungate_work);
+ /*
+ * fall through to check if we should wait for this
+ * work to be done or not.
+ */
+ case REQ_CLKS_ON:
+ if (async) {
+ rc = -EAGAIN;
+ hba->clk_gating.active_reqs--;
+ break;
+ }
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ flush_work(&hba->clk_gating.ungate_work);
+ /* Make sure state is CLKS_ON before returning */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ goto start;
+ default:
+ dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
+ __func__, hba->clk_gating.state);
+ break;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+out:
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ufshcd_hold);
+
+static void ufshcd_gate_work(struct work_struct *work)
+{
+ struct ufs_hba *hba = container_of(work, struct ufs_hba,
+ clk_gating.gate_work.work);
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ /*
+ * In case you are here to cancel this work the gating state
+ * would be marked as REQ_CLKS_ON. In this case save time by
+ * skipping the gating work and exit after changing the clock
+ * state to CLKS_ON.
+ */
+ if (hba->clk_gating.is_suspended ||
+ (hba->clk_gating.state == REQ_CLKS_ON)) {
+ hba->clk_gating.state = CLKS_ON;
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
+ hba->clk_gating.state);
+ goto rel_lock;
+ }
+
+ if (hba->clk_gating.active_reqs
+ || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
+ || hba->lrb_in_use || hba->outstanding_tasks
+ || hba->active_uic_cmd || hba->uic_async_done)
+ goto rel_lock;
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ /* put the link into hibern8 mode before turning off clocks */
+ if (ufshcd_can_hibern8_during_gating(hba)) {
+ if (ufshcd_uic_hibern8_enter(hba)) {
+ hba->clk_gating.state = CLKS_ON;
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
+ hba->clk_gating.state);
+ goto out;
+ }
+ ufshcd_set_link_hibern8(hba);
+ }
+
+ if (!ufshcd_is_link_active(hba))
+ ufshcd_setup_clocks(hba, false);
+ else
+ /* If link is active, device ref_clk can't be switched off */
+ __ufshcd_setup_clocks(hba, false, true);
+
+ /*
+ * In case you are here to cancel this work the gating state
+ * would be marked as REQ_CLKS_ON. In this case keep the state
+ * as REQ_CLKS_ON which would anyway imply that clocks are off
+ * and a request to turn them on is pending. By doing this way,
+ * we keep the state machine in tact and this would ultimately
+ * prevent from doing cancel work multiple times when there are
+ * new requests arriving before the current cancel work is done.
+ */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (hba->clk_gating.state == REQ_CLKS_OFF) {
+ hba->clk_gating.state = CLKS_OFF;
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
+ hba->clk_gating.state);
+ }
+rel_lock:
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+out:
+ return;
+}
+
+/* host lock must be held before calling this variant */
+static void __ufshcd_release(struct ufs_hba *hba)
+{
+ if (!ufshcd_is_clkgating_allowed(hba))
+ return;
+
+ hba->clk_gating.active_reqs--;
+
+ if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
+ || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
+ || hba->lrb_in_use || hba->outstanding_tasks
+ || hba->active_uic_cmd || hba->uic_async_done
+ || ufshcd_eh_in_progress(hba))
+ return;
+
+ hba->clk_gating.state = REQ_CLKS_OFF;
+ trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
+ schedule_delayed_work(&hba->clk_gating.gate_work,
+ msecs_to_jiffies(hba->clk_gating.delay_ms));
+}
+
+void ufshcd_release(struct ufs_hba *hba)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ __ufshcd_release(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+EXPORT_SYMBOL_GPL(ufshcd_release);
+
+static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
+}
+
+static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ unsigned long flags, value;
+
+ if (kstrtoul(buf, 0, &value))
+ return -EINVAL;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->clk_gating.delay_ms = value;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return count;
+}
+
+static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
+}
+
+static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ unsigned long flags;
+ u32 value;
+
+ if (kstrtou32(buf, 0, &value))
+ return -EINVAL;
+
+ value = !!value;
+ if (value == hba->clk_gating.is_enabled)
+ goto out;
+
+ if (value) {
+ ufshcd_release(hba);
+ } else {
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->clk_gating.active_reqs++;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ }
+
+ hba->clk_gating.is_enabled = value;
+out:
+ return count;
+}
+
+static void ufshcd_init_clk_gating(struct ufs_hba *hba)
+{
+ char wq_name[sizeof("ufs_clk_gating_00")];
+
+ if (!ufshcd_is_clkgating_allowed(hba))
+ return;
+
+ hba->clk_gating.delay_ms = 150;
+ INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
+ INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
+
+ snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
+ hba->host->host_no);
+ hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
+ WQ_MEM_RECLAIM);
+
+ hba->clk_gating.is_enabled = true;
+
+ hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
+ hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
+ sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
+ hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
+ hba->clk_gating.delay_attr.attr.mode = 0644;
+ if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
+ dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
+
+ hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
+ hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
+ sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
+ hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
+ hba->clk_gating.enable_attr.attr.mode = 0644;
+ if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
+ dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
+}
+
+static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
+{
+ if (!ufshcd_is_clkgating_allowed(hba))
+ return;
+ device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
+ device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
+ cancel_work_sync(&hba->clk_gating.ungate_work);
+ cancel_delayed_work_sync(&hba->clk_gating.gate_work);
+ destroy_workqueue(hba->clk_gating.clk_gating_workq);
+}
+
+/* Must be called with host lock acquired */
+static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
+{
+ bool queue_resume_work = false;
+
+ if (!ufshcd_is_clkscaling_supported(hba))
+ return;
+
+ if (!hba->clk_scaling.active_reqs++)
+ queue_resume_work = true;
+
+ if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
+ return;
+
+ if (queue_resume_work)
+ queue_work(hba->clk_scaling.workq,
+ &hba->clk_scaling.resume_work);
+
+ if (!hba->clk_scaling.window_start_t) {
+ hba->clk_scaling.window_start_t = jiffies;
+ hba->clk_scaling.tot_busy_t = 0;
+ hba->clk_scaling.is_busy_started = false;
+ }
+
+ if (!hba->clk_scaling.is_busy_started) {
+ hba->clk_scaling.busy_start_t = ktime_get();
+ hba->clk_scaling.is_busy_started = true;
+ }
+}
+
+static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
+{
+ struct ufs_clk_scaling *scaling = &hba->clk_scaling;
+
+ if (!ufshcd_is_clkscaling_supported(hba))
+ return;
+
+ if (!hba->outstanding_reqs && scaling->is_busy_started) {
+ scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
+ scaling->busy_start_t));
+ scaling->busy_start_t = 0;
+ scaling->is_busy_started = false;
+ }
+}
+/**
+ * ufshcd_send_command - Send SCSI or device management commands
+ * @hba: per adapter instance
+ * @task_tag: Task tag of the command
+ */
+static inline
+void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
+{
+ hba->lrb[task_tag].issue_time_stamp = ktime_get();
+ hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0);
+ ufshcd_clk_scaling_start_busy(hba);
+ __set_bit(task_tag, &hba->outstanding_reqs);
+ ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ /* Make sure that doorbell is committed immediately */
+ wmb();
+ ufshcd_add_command_trace(hba, task_tag, "send");
+}
+
+/**
+ * ufshcd_copy_sense_data - Copy sense data in case of check condition
+ * @lrbp: pointer to local reference block
+ */
+static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
+{
+ int len;
+ if (lrbp->sense_buffer &&
+ ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
+ int len_to_copy;
+
+ len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
+ len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
+
+ memcpy(lrbp->sense_buffer,
+ lrbp->ucd_rsp_ptr->sr.sense_data,
+ min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
+ }
+}
+
+/**
+ * ufshcd_copy_query_response() - Copy the Query Response and the data
+ * descriptor
+ * @hba: per adapter instance
+ * @lrbp: pointer to local reference block
+ */
+static
+int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
+
+ memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
+
+ /* Get the descriptor */
+ if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
+ u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
+ GENERAL_UPIU_REQUEST_SIZE;
+ u16 resp_len;
+ u16 buf_len;
+
+ /* data segment length */
+ resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
+ MASK_QUERY_DATA_SEG_LEN;
+ buf_len = be16_to_cpu(
+ hba->dev_cmd.query.request.upiu_req.length);
+ if (likely(buf_len >= resp_len)) {
+ memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
+ } else {
+ dev_warn(hba->dev,
+ "%s: Response size is bigger than buffer",
+ __func__);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ufshcd_hba_capabilities - Read controller capabilities
+ * @hba: per adapter instance
+ */
+static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
+{
+ hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
+
+ /* nutrs and nutmrs are 0 based values */
+ hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
+ hba->nutmrs =
+ ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
+}
+
+/**
+ * ufshcd_ready_for_uic_cmd - Check if controller is ready
+ * to accept UIC commands
+ * @hba: per adapter instance
+ * Return true on success, else false
+ */
+static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
+{
+ if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * ufshcd_get_upmcrs - Get the power mode change request status
+ * @hba: Pointer to adapter instance
+ *
+ * This function gets the UPMCRS field of HCS register
+ * Returns value of UPMCRS field
+ */
+static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
+{
+ return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
+}
+
+/**
+ * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
+ * @hba: per adapter instance
+ * @uic_cmd: UIC command
+ *
+ * Mutex must be held.
+ */
+static inline void
+ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+{
+ WARN_ON(hba->active_uic_cmd);
+
+ hba->active_uic_cmd = uic_cmd;
+
+ /* Write Args */
+ ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
+ ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
+ ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
+
+ /* Write UIC Cmd */
+ ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
+ REG_UIC_COMMAND);
+}
+
+/**
+ * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
+ * @hba: per adapter instance
+ * @uic_cmd: UIC command
+ *
+ * Must be called with mutex held.
+ * Returns 0 only if success.
+ */
+static int
+ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+{
+ int ret;
+ unsigned long flags;
+
+ if (wait_for_completion_timeout(&uic_cmd->done,
+ msecs_to_jiffies(UIC_CMD_TIMEOUT)))
+ ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
+ else
+ ret = -ETIMEDOUT;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->active_uic_cmd = NULL;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ return ret;
+}
+
+/**
+ * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
+ * @hba: per adapter instance
+ * @uic_cmd: UIC command
+ * @completion: initialize the completion only if this is set to true
+ *
+ * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
+ * with mutex held and host_lock locked.
+ * Returns 0 only if success.
+ */
+static int
+__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
+ bool completion)
+{
+ if (!ufshcd_ready_for_uic_cmd(hba)) {
+ dev_err(hba->dev,
+ "Controller not ready to accept UIC commands\n");
+ return -EIO;
+ }
+
+ if (completion)
+ init_completion(&uic_cmd->done);
+
+ ufshcd_dispatch_uic_cmd(hba, uic_cmd);
+
+ return 0;
+}
+
+/**
+ * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
+ * @hba: per adapter instance
+ * @uic_cmd: UIC command
+ *
+ * Returns 0 only if success.
+ */
+static int
+ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+{
+ int ret;
+ unsigned long flags;
+
+ ufshcd_hold(hba, false);
+ mutex_lock(&hba->uic_cmd_mutex);
+ ufshcd_add_delay_before_dme_cmd(hba);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ if (!ret)
+ ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
+
+ mutex_unlock(&hba->uic_cmd_mutex);
+
+ ufshcd_release(hba);
+ return ret;
+}
+
+/**
+ * ufshcd_map_sg - Map scatter-gather list to prdt
+ * @hba: per adapter instance
+ * @lrbp: pointer to local reference block
+ *
+ * Returns 0 in case of success, non-zero value in case of failure
+ */
+static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ struct ufshcd_sg_entry *prd_table;
+ struct scatterlist *sg;
+ struct scsi_cmnd *cmd;
+ int sg_segments;
+ int i;
+
+ cmd = lrbp->cmd;
+ sg_segments = scsi_dma_map(cmd);
+ if (sg_segments < 0)
+ return sg_segments;
+
+ if (sg_segments) {
+ if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
+ lrbp->utr_descriptor_ptr->prd_table_length =
+ cpu_to_le16((u16)(sg_segments *
+ sizeof(struct ufshcd_sg_entry)));
+ else
+ lrbp->utr_descriptor_ptr->prd_table_length =
+ cpu_to_le16((u16) (sg_segments));
+
+ prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
+
+ scsi_for_each_sg(cmd, sg, sg_segments, i) {
+ prd_table[i].size =
+ cpu_to_le32(((u32) sg_dma_len(sg))-1);
+ prd_table[i].base_addr =
+ cpu_to_le32(lower_32_bits(sg->dma_address));
+ prd_table[i].upper_addr =
+ cpu_to_le32(upper_32_bits(sg->dma_address));
+ prd_table[i].reserved = 0;
+ }
+ } else {
+ lrbp->utr_descriptor_ptr->prd_table_length = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * ufshcd_enable_intr - enable interrupts
+ * @hba: per adapter instance
+ * @intrs: interrupt bits
+ */
+static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
+{
+ u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+
+ if (hba->ufs_version == UFSHCI_VERSION_10) {
+ u32 rw;
+ rw = set & INTERRUPT_MASK_RW_VER_10;
+ set = rw | ((set ^ intrs) & intrs);
+ } else {
+ set |= intrs;
+ }
+
+ ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
+}
+
+/**
+ * ufshcd_disable_intr - disable interrupts
+ * @hba: per adapter instance
+ * @intrs: interrupt bits
+ */
+static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
+{
+ u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+
+ if (hba->ufs_version == UFSHCI_VERSION_10) {
+ u32 rw;
+ rw = (set & INTERRUPT_MASK_RW_VER_10) &
+ ~(intrs & INTERRUPT_MASK_RW_VER_10);
+ set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
+
+ } else {
+ set &= ~intrs;
+ }
+
+ ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
+}
+
+/**
+ * ufshcd_prepare_req_desc_hdr() - Fills the requests header
+ * descriptor according to request
+ * @lrbp: pointer to local reference block
+ * @upiu_flags: flags required in the header
+ * @cmd_dir: requests data direction
+ */
+static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
+ u32 *upiu_flags, enum dma_data_direction cmd_dir)
+{
+ struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
+ u32 data_direction;
+ u32 dword_0;
+
+ if (cmd_dir == DMA_FROM_DEVICE) {
+ data_direction = UTP_DEVICE_TO_HOST;
+ *upiu_flags = UPIU_CMD_FLAGS_READ;
+ } else if (cmd_dir == DMA_TO_DEVICE) {
+ data_direction = UTP_HOST_TO_DEVICE;
+ *upiu_flags = UPIU_CMD_FLAGS_WRITE;
+ } else {
+ data_direction = UTP_NO_DATA_TRANSFER;
+ *upiu_flags = UPIU_CMD_FLAGS_NONE;
+ }
+
+ dword_0 = data_direction | (lrbp->command_type
+ << UPIU_COMMAND_TYPE_OFFSET);
+ if (lrbp->intr_cmd)
+ dword_0 |= UTP_REQ_DESC_INT_CMD;
+
+ /* Transfer request descriptor header fields */
+ req_desc->header.dword_0 = cpu_to_le32(dword_0);
+ /* dword_1 is reserved, hence it is set to 0 */
+ req_desc->header.dword_1 = 0;
+ /*
+ * assigning invalid value for command status. Controller
+ * updates OCS on command completion, with the command
+ * status
+ */
+ req_desc->header.dword_2 =
+ cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
+ /* dword_3 is reserved, hence it is set to 0 */
+ req_desc->header.dword_3 = 0;
+
+ req_desc->prd_table_length = 0;
+}
+
+/**
+ * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
+ * for scsi commands
+ * @lrbp: local reference block pointer
+ * @upiu_flags: flags
+ */
+static
+void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
+{
+ struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
+ unsigned short cdb_len;
+
+ /* command descriptor fields */
+ ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
+ UPIU_TRANSACTION_COMMAND, upiu_flags,
+ lrbp->lun, lrbp->task_tag);
+ ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
+ UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
+
+ /* Total EHS length and Data segment length will be zero */
+ ucd_req_ptr->header.dword_2 = 0;
+
+ ucd_req_ptr->sc.exp_data_transfer_len =
+ cpu_to_be32(lrbp->cmd->sdb.length);
+
+ cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
+ memset(ucd_req_ptr->sc.cdb, 0, MAX_CDB_SIZE);
+ memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
+
+ memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
+}
+
+/**
+ * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
+ * for query requsts
+ * @hba: UFS hba
+ * @lrbp: local reference block pointer
+ * @upiu_flags: flags
+ */
+static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp, u32 upiu_flags)
+{
+ struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
+ struct ufs_query *query = &hba->dev_cmd.query;
+ u16 len = be16_to_cpu(query->request.upiu_req.length);
+ u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
+
+ /* Query request header */
+ ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
+ UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
+ lrbp->lun, lrbp->task_tag);
+ ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
+ 0, query->request.query_func, 0, 0);
+
+ /* Data segment length only need for WRITE_DESC */
+ if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
+ ucd_req_ptr->header.dword_2 =
+ UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
+ else
+ ucd_req_ptr->header.dword_2 = 0;
+
+ /* Copy the Query Request buffer as is */
+ memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
+ QUERY_OSF_SIZE);
+
+ /* Copy the Descriptor */
+ if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
+ memcpy(descp, query->descriptor, len);
+
+ memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
+}
+
+static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
+{
+ struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
+
+ memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
+
+ /* command descriptor fields */
+ ucd_req_ptr->header.dword_0 =
+ UPIU_HEADER_DWORD(
+ UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
+ /* clear rest of the fields of basic header */
+ ucd_req_ptr->header.dword_1 = 0;
+ ucd_req_ptr->header.dword_2 = 0;
+
+ memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
+}
+
+/**
+ * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
+ * for Device Management Purposes
+ * @hba: per adapter instance
+ * @lrbp: pointer to local reference block
+ */
+static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ u32 upiu_flags;
+ int ret = 0;
+
+ if ((hba->ufs_version == UFSHCI_VERSION_10) ||
+ (hba->ufs_version == UFSHCI_VERSION_11))
+ lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
+ else
+ lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
+
+ ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
+ if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
+ ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
+ else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
+ ufshcd_prepare_utp_nop_upiu(lrbp);
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+/**
+ * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
+ * for SCSI Purposes
+ * @hba: per adapter instance
+ * @lrbp: pointer to local reference block
+ */
+static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ u32 upiu_flags;
+ int ret = 0;
+
+ if ((hba->ufs_version == UFSHCI_VERSION_10) ||
+ (hba->ufs_version == UFSHCI_VERSION_11))
+ lrbp->command_type = UTP_CMD_TYPE_SCSI;
+ else
+ lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
+
+ if (likely(lrbp->cmd)) {
+ ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
+ lrbp->cmd->sc_data_direction);
+ ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/**
+ * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
+ * @upiu_wlun_id: UPIU W-LUN id
+ *
+ * Returns SCSI W-LUN id
+ */
+static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
+{
+ return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
+}
+
+/**
+ * ufshcd_queuecommand - main entry point for SCSI requests
+ * @host: SCSI host pointer
+ * @cmd: command from SCSI Midlayer
+ *
+ * Returns 0 for success, non-zero in case of failure
+ */
+static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
+{
+ struct ufshcd_lrb *lrbp;
+ struct ufs_hba *hba;
+ unsigned long flags;
+ int tag;
+ int err = 0;
+
+ hba = shost_priv(host);
+
+ tag = cmd->request->tag;
+ if (!ufshcd_valid_tag(hba, tag)) {
+ dev_err(hba->dev,
+ "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
+ __func__, tag, cmd, cmd->request);
+ BUG();
+ }
+
+ if (!down_read_trylock(&hba->clk_scaling_lock))
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ switch (hba->ufshcd_state) {
+ case UFSHCD_STATE_OPERATIONAL:
+ break;
+ case UFSHCD_STATE_EH_SCHEDULED:
+ case UFSHCD_STATE_RESET:
+ err = SCSI_MLQUEUE_HOST_BUSY;
+ goto out_unlock;
+ case UFSHCD_STATE_ERROR:
+ set_host_byte(cmd, DID_ERROR);
+ cmd->scsi_done(cmd);
+ goto out_unlock;
+ default:
+ dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
+ __func__, hba->ufshcd_state);
+ set_host_byte(cmd, DID_BAD_TARGET);
+ cmd->scsi_done(cmd);
+ goto out_unlock;
+ }
+
+ /* if error handling is in progress, don't issue commands */
+ if (ufshcd_eh_in_progress(hba)) {
+ set_host_byte(cmd, DID_ERROR);
+ cmd->scsi_done(cmd);
+ goto out_unlock;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ hba->req_abort_count = 0;
+
+ /* acquire the tag to make sure device cmds don't use it */
+ if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
+ /*
+ * Dev manage command in progress, requeue the command.
+ * Requeuing the command helps in cases where the request *may*
+ * find different tag instead of waiting for dev manage command
+ * completion.
+ */
+ err = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+
+ err = ufshcd_hold(hba, true);
+ if (err) {
+ err = SCSI_MLQUEUE_HOST_BUSY;
+ clear_bit_unlock(tag, &hba->lrb_in_use);
+ goto out;
+ }
+ WARN_ON(hba->clk_gating.state != CLKS_ON);
+
+ lrbp = &hba->lrb[tag];
+
+ WARN_ON(lrbp->cmd);
+ lrbp->cmd = cmd;
+ lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
+ lrbp->sense_buffer = cmd->sense_buffer;
+ lrbp->task_tag = tag;
+ lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
+ lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
+ lrbp->req_abort_skip = false;
+
+ ufshcd_comp_scsi_upiu(hba, lrbp);
+
+ err = ufshcd_map_sg(hba, lrbp);
+ if (err) {
+ lrbp->cmd = NULL;
+ clear_bit_unlock(tag, &hba->lrb_in_use);
+ goto out;
+ }
+ /* Make sure descriptors are ready before ringing the doorbell */
+ wmb();
+
+ /* issue command to the controller */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
+ ufshcd_send_command(hba, tag);
+out_unlock:
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+out:
+ up_read(&hba->clk_scaling_lock);
+ return err;
+}
+
+static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
+{
+ lrbp->cmd = NULL;
+ lrbp->sense_bufflen = 0;
+ lrbp->sense_buffer = NULL;
+ lrbp->task_tag = tag;
+ lrbp->lun = 0; /* device management cmd is not specific to any LUN */
+ lrbp->intr_cmd = true; /* No interrupt aggregation */
+ hba->dev_cmd.type = cmd_type;
+
+ return ufshcd_comp_devman_upiu(hba, lrbp);
+}
+
+static int
+ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
+{
+ int err = 0;
+ unsigned long flags;
+ u32 mask = 1 << tag;
+
+ /* clear outstanding transaction before retry */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_utrl_clear(hba, tag);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ /*
+ * wait for for h/w to clear corresponding bit in door-bell.
+ * max. wait is 1 sec.
+ */
+ err = ufshcd_wait_for_register(hba,
+ REG_UTP_TRANSFER_REQ_DOOR_BELL,
+ mask, ~mask, 1000, 1000, true);
+
+ return err;
+}
+
+static int
+ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
+
+ /* Get the UPIU response */
+ query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
+ UPIU_RSP_CODE_OFFSET;
+ return query_res->response;
+}
+
+/**
+ * ufshcd_dev_cmd_completion() - handles device management command responses
+ * @hba: per adapter instance
+ * @lrbp: pointer to local reference block
+ */
+static int
+ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ int resp;
+ int err = 0;
+
+ hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
+ resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
+
+ switch (resp) {
+ case UPIU_TRANSACTION_NOP_IN:
+ if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
+ err = -EINVAL;
+ dev_err(hba->dev, "%s: unexpected response %x\n",
+ __func__, resp);
+ }
+ break;
+ case UPIU_TRANSACTION_QUERY_RSP:
+ err = ufshcd_check_query_response(hba, lrbp);
+ if (!err)
+ err = ufshcd_copy_query_response(hba, lrbp);
+ break;
+ case UPIU_TRANSACTION_REJECT_UPIU:
+ /* TODO: handle Reject UPIU Response */
+ err = -EPERM;
+ dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
+ __func__);
+ break;
+ default:
+ err = -EINVAL;
+ dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
+ __func__, resp);
+ break;
+ }
+
+ return err;
+}
+
+static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp, int max_timeout)
+{
+ int err = 0;
+ unsigned long time_left;
+ unsigned long flags;
+
+ time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
+ msecs_to_jiffies(max_timeout));
+
+ /* Make sure descriptors are ready before ringing the doorbell */
+ wmb();
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->dev_cmd.complete = NULL;
+ if (likely(time_left)) {
+ err = ufshcd_get_tr_ocs(lrbp);
+ if (!err)
+ err = ufshcd_dev_cmd_completion(hba, lrbp);
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ if (!time_left) {
+ err = -ETIMEDOUT;
+ dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
+ __func__, lrbp->task_tag);
+ if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
+ /* successfully cleared the command, retry if needed */
+ err = -EAGAIN;
+ /*
+ * in case of an error, after clearing the doorbell,
+ * we also need to clear the outstanding_request
+ * field in hba
+ */
+ ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
+ }
+
+ return err;
+}
+
+/**
+ * ufshcd_get_dev_cmd_tag - Get device management command tag
+ * @hba: per-adapter instance
+ * @tag_out: pointer to variable with available slot value
+ *
+ * Get a free slot and lock it until device management command
+ * completes.
+ *
+ * Returns false if free slot is unavailable for locking, else
+ * return true with tag value in @tag.
+ */
+static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
+{
+ int tag;
+ bool ret = false;
+ unsigned long tmp;
+
+ if (!tag_out)
+ goto out;
+
+ do {
+ tmp = ~hba->lrb_in_use;
+ tag = find_last_bit(&tmp, hba->nutrs);
+ if (tag >= hba->nutrs)
+ goto out;
+ } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
+
+ *tag_out = tag;
+ ret = true;
+out:
+ return ret;
+}
+
+static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
+{
+ clear_bit_unlock(tag, &hba->lrb_in_use);
+}
+
+/**
+ * ufshcd_exec_dev_cmd - API for sending device management requests
+ * @hba: UFS hba
+ * @cmd_type: specifies the type (NOP, Query...)
+ * @timeout: time in seconds
+ *
+ * NOTE: Since there is only one available tag for device management commands,
+ * it is expected you hold the hba->dev_cmd.lock mutex.
+ */
+static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
+ enum dev_cmd_type cmd_type, int timeout)
+{
+ struct ufshcd_lrb *lrbp;
+ int err;
+ int tag;
+ struct completion wait;
+ unsigned long flags;
+
+ down_read(&hba->clk_scaling_lock);
+
+ /*
+ * Get free slot, sleep if slots are unavailable.
+ * Even though we use wait_event() which sleeps indefinitely,
+ * the maximum wait time is bounded by SCSI request timeout.
+ */
+ wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
+
+ init_completion(&wait);
+ lrbp = &hba->lrb[tag];
+ WARN_ON(lrbp->cmd);
+ err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
+ if (unlikely(err))
+ goto out_put_tag;
+
+ hba->dev_cmd.complete = &wait;
+
+ ufshcd_add_query_upiu_trace(hba, tag, "query_send");
+ /* Make sure descriptors are ready before ringing the doorbell */
+ wmb();
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
+ ufshcd_send_command(hba, tag);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
+
+ ufshcd_add_query_upiu_trace(hba, tag,
+ err ? "query_complete_err" : "query_complete");
+
+out_put_tag:
+ ufshcd_put_dev_cmd_tag(hba, tag);
+ wake_up(&hba->dev_cmd.tag_wq);
+ up_read(&hba->clk_scaling_lock);
+ return err;
+}
+
+/**
+ * ufshcd_init_query() - init the query response and request parameters
+ * @hba: per-adapter instance
+ * @request: address of the request pointer to be initialized
+ * @response: address of the response pointer to be initialized
+ * @opcode: operation to perform
+ * @idn: flag idn to access
+ * @index: LU number to access
+ * @selector: query/flag/descriptor further identification
+ */
+static inline void ufshcd_init_query(struct ufs_hba *hba,
+ struct ufs_query_req **request, struct ufs_query_res **response,
+ enum query_opcode opcode, u8 idn, u8 index, u8 selector)
+{
+ *request = &hba->dev_cmd.query.request;
+ *response = &hba->dev_cmd.query.response;
+ memset(*request, 0, sizeof(struct ufs_query_req));
+ memset(*response, 0, sizeof(struct ufs_query_res));
+ (*request)->upiu_req.opcode = opcode;
+ (*request)->upiu_req.idn = idn;
+ (*request)->upiu_req.index = index;
+ (*request)->upiu_req.selector = selector;
+}
+
+static int ufshcd_query_flag_retry(struct ufs_hba *hba,
+ enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
+{
+ int ret;
+ int retries;
+
+ for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
+ ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
+ if (ret)
+ dev_dbg(hba->dev,
+ "%s: failed with error %d, retries %d\n",
+ __func__, ret, retries);
+ else
+ break;
+ }
+
+ if (ret)
+ dev_err(hba->dev,
+ "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
+ __func__, opcode, idn, ret, retries);
+ return ret;
+}
+
+/**
+ * ufshcd_query_flag() - API function for sending flag query requests
+ * @hba: per-adapter instance
+ * @opcode: flag query to perform
+ * @idn: flag idn to access
+ * @flag_res: the flag value after the query request completes
+ *
+ * Returns 0 for success, non-zero in case of failure
+ */
+int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
+ enum flag_idn idn, bool *flag_res)
+{
+ struct ufs_query_req *request = NULL;
+ struct ufs_query_res *response = NULL;
+ int err, index = 0, selector = 0;
+ int timeout = QUERY_REQ_TIMEOUT;
+
+ BUG_ON(!hba);
+
+ ufshcd_hold(hba, false);
+ mutex_lock(&hba->dev_cmd.lock);
+ ufshcd_init_query(hba, &request, &response, opcode, idn, index,
+ selector);
+
+ switch (opcode) {
+ case UPIU_QUERY_OPCODE_SET_FLAG:
+ case UPIU_QUERY_OPCODE_CLEAR_FLAG:
+ case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
+ break;
+ case UPIU_QUERY_OPCODE_READ_FLAG:
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
+ if (!flag_res) {
+ /* No dummy reads */
+ dev_err(hba->dev, "%s: Invalid argument for read request\n",
+ __func__);
+ err = -EINVAL;
+ goto out_unlock;
+ }
+ break;
+ default:
+ dev_err(hba->dev,
+ "%s: Expected query flag opcode but got = %d\n",
+ __func__, opcode);
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
+
+ if (err) {
+ dev_err(hba->dev,
+ "%s: Sending flag query for idn %d failed, err = %d\n",
+ __func__, idn, err);
+ goto out_unlock;
+ }
+
+ if (flag_res)
+ *flag_res = (be32_to_cpu(response->upiu_res.value) &
+ MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
+
+out_unlock:
+ mutex_unlock(&hba->dev_cmd.lock);
+ ufshcd_release(hba);
+ return err;
+}
+
+/**
+ * ufshcd_query_attr - API function for sending attribute requests
+ * @hba: per-adapter instance
+ * @opcode: attribute opcode
+ * @idn: attribute idn to access
+ * @index: index field
+ * @selector: selector field
+ * @attr_val: the attribute value after the query request completes
+ *
+ * Returns 0 for success, non-zero in case of failure
+*/
+int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
+ enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
+{
+ struct ufs_query_req *request = NULL;
+ struct ufs_query_res *response = NULL;
+ int err;
+
+ BUG_ON(!hba);
+
+ ufshcd_hold(hba, false);
+ if (!attr_val) {
+ dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
+ __func__, opcode);
+ err = -EINVAL;
+ goto out;
+ }
+
+ mutex_lock(&hba->dev_cmd.lock);
+ ufshcd_init_query(hba, &request, &response, opcode, idn, index,
+ selector);
+
+ switch (opcode) {
+ case UPIU_QUERY_OPCODE_WRITE_ATTR:
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
+ request->upiu_req.value = cpu_to_be32(*attr_val);
+ break;
+ case UPIU_QUERY_OPCODE_READ_ATTR:
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
+ break;
+ default:
+ dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
+ __func__, opcode);
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+
+ if (err) {
+ dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
+ __func__, opcode, idn, index, err);
+ goto out_unlock;
+ }
+
+ *attr_val = be32_to_cpu(response->upiu_res.value);
+
+out_unlock:
+ mutex_unlock(&hba->dev_cmd.lock);
+out:
+ ufshcd_release(hba);
+ return err;
+}
+
+/**
+ * ufshcd_query_attr_retry() - API function for sending query
+ * attribute with retries
+ * @hba: per-adapter instance
+ * @opcode: attribute opcode
+ * @idn: attribute idn to access
+ * @index: index field
+ * @selector: selector field
+ * @attr_val: the attribute value after the query request
+ * completes
+ *
+ * Returns 0 for success, non-zero in case of failure
+*/
+static int ufshcd_query_attr_retry(struct ufs_hba *hba,
+ enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
+ u32 *attr_val)
+{
+ int ret = 0;
+ u32 retries;
+
+ for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+ ret = ufshcd_query_attr(hba, opcode, idn, index,
+ selector, attr_val);
+ if (ret)
+ dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
+ __func__, ret, retries);
+ else
+ break;
+ }
+
+ if (ret)
+ dev_err(hba->dev,
+ "%s: query attribute, idn %d, failed with error %d after %d retires\n",
+ __func__, idn, ret, QUERY_REQ_RETRIES);
+ return ret;
+}
+
+static int __ufshcd_query_descriptor(struct ufs_hba *hba,
+ enum query_opcode opcode, enum desc_idn idn, u8 index,
+ u8 selector, u8 *desc_buf, int *buf_len)
+{
+ struct ufs_query_req *request = NULL;
+ struct ufs_query_res *response = NULL;
+ int err;
+
+ BUG_ON(!hba);
+
+ ufshcd_hold(hba, false);
+ if (!desc_buf) {
+ dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
+ __func__, opcode);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
+ dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
+ __func__, *buf_len);
+ err = -EINVAL;
+ goto out;
+ }
+
+ mutex_lock(&hba->dev_cmd.lock);
+ ufshcd_init_query(hba, &request, &response, opcode, idn, index,
+ selector);
+ hba->dev_cmd.query.descriptor = desc_buf;
+ request->upiu_req.length = cpu_to_be16(*buf_len);
+
+ switch (opcode) {
+ case UPIU_QUERY_OPCODE_WRITE_DESC:
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
+ break;
+ case UPIU_QUERY_OPCODE_READ_DESC:
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
+ break;
+ default:
+ dev_err(hba->dev,
+ "%s: Expected query descriptor opcode but got = 0x%.2x\n",
+ __func__, opcode);
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+
+ if (err) {
+ dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
+ __func__, opcode, idn, index, err);
+ goto out_unlock;
+ }
+
+ hba->dev_cmd.query.descriptor = NULL;
+ *buf_len = be16_to_cpu(response->upiu_res.length);
+
+out_unlock:
+ mutex_unlock(&hba->dev_cmd.lock);
+out:
+ ufshcd_release(hba);
+ return err;
+}
+
+/**
+ * ufshcd_query_descriptor_retry - API function for sending descriptor requests
+ * @hba: per-adapter instance
+ * @opcode: attribute opcode
+ * @idn: attribute idn to access
+ * @index: index field
+ * @selector: selector field
+ * @desc_buf: the buffer that contains the descriptor
+ * @buf_len: length parameter passed to the device
+ *
+ * Returns 0 for success, non-zero in case of failure.
+ * The buf_len parameter will contain, on return, the length parameter
+ * received on the response.
+ */
+int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
+ enum query_opcode opcode,
+ enum desc_idn idn, u8 index,
+ u8 selector,
+ u8 *desc_buf, int *buf_len)
+{
+ int err;
+ int retries;
+
+ for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+ err = __ufshcd_query_descriptor(hba, opcode, idn, index,
+ selector, desc_buf, buf_len);
+ if (!err || err == -EINVAL)
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * ufshcd_read_desc_length - read the specified descriptor length from header
+ * @hba: Pointer to adapter instance
+ * @desc_id: descriptor idn value
+ * @desc_index: descriptor index
+ * @desc_length: pointer to variable to read the length of descriptor
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+static int ufshcd_read_desc_length(struct ufs_hba *hba,
+ enum desc_idn desc_id,
+ int desc_index,
+ int *desc_length)
+{
+ int ret;
+ u8 header[QUERY_DESC_HDR_SIZE];
+ int header_len = QUERY_DESC_HDR_SIZE;
+
+ if (desc_id >= QUERY_DESC_IDN_MAX)
+ return -EINVAL;
+
+ ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
+ desc_id, desc_index, 0, header,
+ &header_len);
+
+ if (ret) {
+ dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
+ __func__, desc_id);
+ return ret;
+ } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
+ dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
+ __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
+ desc_id);
+ ret = -EINVAL;
+ }
+
+ *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
+ return ret;
+
+}
+
+/**
+ * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
+ * @hba: Pointer to adapter instance
+ * @desc_id: descriptor idn value
+ * @desc_len: mapped desc length (out)
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+int ufshcd_map_desc_id_to_length(struct ufs_hba *hba,
+ enum desc_idn desc_id, int *desc_len)
+{
+ switch (desc_id) {
+ case QUERY_DESC_IDN_DEVICE:
+ *desc_len = hba->desc_size.dev_desc;
+ break;
+ case QUERY_DESC_IDN_POWER:
+ *desc_len = hba->desc_size.pwr_desc;
+ break;
+ case QUERY_DESC_IDN_GEOMETRY:
+ *desc_len = hba->desc_size.geom_desc;
+ break;
+ case QUERY_DESC_IDN_CONFIGURATION:
+ *desc_len = hba->desc_size.conf_desc;
+ break;
+ case QUERY_DESC_IDN_UNIT:
+ *desc_len = hba->desc_size.unit_desc;
+ break;
+ case QUERY_DESC_IDN_INTERCONNECT:
+ *desc_len = hba->desc_size.interc_desc;
+ break;
+ case QUERY_DESC_IDN_STRING:
+ *desc_len = QUERY_DESC_MAX_SIZE;
+ break;
+ case QUERY_DESC_IDN_HEALTH:
+ *desc_len = hba->desc_size.hlth_desc;
+ break;
+ case QUERY_DESC_IDN_RFU_0:
+ case QUERY_DESC_IDN_RFU_1:
+ *desc_len = 0;
+ break;
+ default:
+ *desc_len = 0;
+ return -EINVAL;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
+
+/**
+ * ufshcd_read_desc_param - read the specified descriptor parameter
+ * @hba: Pointer to adapter instance
+ * @desc_id: descriptor idn value
+ * @desc_index: descriptor index
+ * @param_offset: offset of the parameter to read
+ * @param_read_buf: pointer to buffer where parameter would be read
+ * @param_size: sizeof(param_read_buf)
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+int ufshcd_read_desc_param(struct ufs_hba *hba,
+ enum desc_idn desc_id,
+ int desc_index,
+ u8 param_offset,
+ u8 *param_read_buf,
+ u8 param_size)
+{
+ int ret;
+ u8 *desc_buf;
+ int buff_len;
+ bool is_kmalloc = true;
+
+ /* Safety check */
+ if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
+ return -EINVAL;
+
+ /* Get the max length of descriptor from structure filled up at probe
+ * time.
+ */
+ ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
+
+ /* Sanity checks */
+ if (ret || !buff_len) {
+ dev_err(hba->dev, "%s: Failed to get full descriptor length",
+ __func__);
+ return ret;
+ }
+
+ /* Check whether we need temp memory */
+ if (param_offset != 0 || param_size < buff_len) {
+ desc_buf = kmalloc(buff_len, GFP_KERNEL);
+ if (!desc_buf)
+ return -ENOMEM;
+ } else {
+ desc_buf = param_read_buf;
+ is_kmalloc = false;
+ }
+
+ /* Request for full descriptor */
+ ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
+ desc_id, desc_index, 0,
+ desc_buf, &buff_len);
+
+ if (ret) {
+ dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
+ __func__, desc_id, desc_index, param_offset, ret);
+ goto out;
+ }
+
+ /* Sanity check */
+ if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
+ dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
+ __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Check wherher we will not copy more data, than available */
+ if (is_kmalloc && param_size > buff_len)
+ param_size = buff_len;
+
+ if (is_kmalloc)
+ memcpy(param_read_buf, &desc_buf[param_offset], param_size);
+out:
+ if (is_kmalloc)
+ kfree(desc_buf);
+ return ret;
+}
+
+static inline int ufshcd_read_desc(struct ufs_hba *hba,
+ enum desc_idn desc_id,
+ int desc_index,
+ u8 *buf,
+ u32 size)
+{
+ return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
+}
+
+static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
+ u8 *buf,
+ u32 size)
+{
+ return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
+}
+
+static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
+{
+ return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
+}
+
+/**
+ * ufshcd_read_string_desc - read string descriptor
+ * @hba: pointer to adapter instance
+ * @desc_index: descriptor index
+ * @buf: pointer to buffer where descriptor would be read
+ * @size: size of buf
+ * @ascii: if true convert from unicode to ascii characters
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index,
+ u8 *buf, u32 size, bool ascii)
+{
+ int err = 0;
+
+ err = ufshcd_read_desc(hba,
+ QUERY_DESC_IDN_STRING, desc_index, buf, size);
+
+ if (err) {
+ dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
+ __func__, QUERY_REQ_RETRIES, err);
+ goto out;
+ }
+
+ if (ascii) {
+ int desc_len;
+ int ascii_len;
+ int i;
+ char *buff_ascii;
+
+ desc_len = buf[0];
+ /* remove header and divide by 2 to move from UTF16 to UTF8 */
+ ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
+ if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
+ dev_err(hba->dev, "%s: buffer allocated size is too small\n",
+ __func__);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
+ if (!buff_ascii) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * the descriptor contains string in UTF16 format
+ * we need to convert to utf-8 so it can be displayed
+ */
+ utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE],
+ desc_len - QUERY_DESC_HDR_SIZE,
+ UTF16_BIG_ENDIAN, buff_ascii, ascii_len);
+
+ /* replace non-printable or non-ASCII characters with spaces */
+ for (i = 0; i < ascii_len; i++)
+ ufshcd_remove_non_printable(&buff_ascii[i]);
+
+ memset(buf + QUERY_DESC_HDR_SIZE, 0,
+ size - QUERY_DESC_HDR_SIZE);
+ memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
+ buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
+ kfree(buff_ascii);
+ }
+out:
+ return err;
+}
+
+/**
+ * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
+ * @hba: Pointer to adapter instance
+ * @lun: lun id
+ * @param_offset: offset of the parameter to read
+ * @param_read_buf: pointer to buffer where parameter would be read
+ * @param_size: sizeof(param_read_buf)
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
+ int lun,
+ enum unit_desc_param param_offset,
+ u8 *param_read_buf,
+ u32 param_size)
+{
+ /*
+ * Unit descriptors are only available for general purpose LUs (LUN id
+ * from 0 to 7) and RPMB Well known LU.
+ */
+ if (!ufs_is_valid_unit_desc_lun(lun))
+ return -EOPNOTSUPP;
+
+ return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
+ param_offset, param_read_buf, param_size);
+}
+
+/**
+ * ufshcd_memory_alloc - allocate memory for host memory space data structures
+ * @hba: per adapter instance
+ *
+ * 1. Allocate DMA memory for Command Descriptor array
+ * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
+ * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
+ * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
+ * (UTMRDL)
+ * 4. Allocate memory for local reference block(lrb).
+ *
+ * Returns 0 for success, non-zero in case of failure
+ */
+static int ufshcd_memory_alloc(struct ufs_hba *hba)
+{
+ size_t utmrdl_size, utrdl_size, ucdl_size;
+
+ /* Allocate memory for UTP command descriptors */
+ ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
+ hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
+ ucdl_size,
+ &hba->ucdl_dma_addr,
+ GFP_KERNEL);
+
+ /*
+ * UFSHCI requires UTP command descriptor to be 128 byte aligned.
+ * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
+ * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
+ * be aligned to 128 bytes as well
+ */
+ if (!hba->ucdl_base_addr ||
+ WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
+ dev_err(hba->dev,
+ "Command Descriptor Memory allocation failed\n");
+ goto out;
+ }
+
+ /*
+ * Allocate memory for UTP Transfer descriptors
+ * UFSHCI requires 1024 byte alignment of UTRD
+ */
+ utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
+ hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
+ utrdl_size,
+ &hba->utrdl_dma_addr,
+ GFP_KERNEL);
+ if (!hba->utrdl_base_addr ||
+ WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
+ dev_err(hba->dev,
+ "Transfer Descriptor Memory allocation failed\n");
+ goto out;
+ }
+
+ /*
+ * Allocate memory for UTP Task Management descriptors
+ * UFSHCI requires 1024 byte alignment of UTMRD
+ */
+ utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
+ hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
+ utmrdl_size,
+ &hba->utmrdl_dma_addr,
+ GFP_KERNEL);
+ if (!hba->utmrdl_base_addr ||
+ WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
+ dev_err(hba->dev,
+ "Task Management Descriptor Memory allocation failed\n");
+ goto out;
+ }
+
+ /* Allocate memory for local reference block */
+ hba->lrb = devm_kcalloc(hba->dev,
+ hba->nutrs, sizeof(struct ufshcd_lrb),
+ GFP_KERNEL);
+ if (!hba->lrb) {
+ dev_err(hba->dev, "LRB Memory allocation failed\n");
+ goto out;
+ }
+ return 0;
+out:
+ return -ENOMEM;
+}
+
+/**
+ * ufshcd_host_memory_configure - configure local reference block with
+ * memory offsets
+ * @hba: per adapter instance
+ *
+ * Configure Host memory space
+ * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
+ * address.
+ * 2. Update each UTRD with Response UPIU offset, Response UPIU length
+ * and PRDT offset.
+ * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
+ * into local reference block.
+ */
+static void ufshcd_host_memory_configure(struct ufs_hba *hba)
+{
+ struct utp_transfer_cmd_desc *cmd_descp;
+ struct utp_transfer_req_desc *utrdlp;
+ dma_addr_t cmd_desc_dma_addr;
+ dma_addr_t cmd_desc_element_addr;
+ u16 response_offset;
+ u16 prdt_offset;
+ int cmd_desc_size;
+ int i;
+
+ utrdlp = hba->utrdl_base_addr;
+ cmd_descp = hba->ucdl_base_addr;
+
+ response_offset =
+ offsetof(struct utp_transfer_cmd_desc, response_upiu);
+ prdt_offset =
+ offsetof(struct utp_transfer_cmd_desc, prd_table);
+
+ cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
+ cmd_desc_dma_addr = hba->ucdl_dma_addr;
+
+ for (i = 0; i < hba->nutrs; i++) {
+ /* Configure UTRD with command descriptor base address */
+ cmd_desc_element_addr =
+ (cmd_desc_dma_addr + (cmd_desc_size * i));
+ utrdlp[i].command_desc_base_addr_lo =
+ cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
+ utrdlp[i].command_desc_base_addr_hi =
+ cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
+
+ /* Response upiu and prdt offset should be in double words */
+ if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
+ utrdlp[i].response_upiu_offset =
+ cpu_to_le16(response_offset);
+ utrdlp[i].prd_table_offset =
+ cpu_to_le16(prdt_offset);
+ utrdlp[i].response_upiu_length =
+ cpu_to_le16(ALIGNED_UPIU_SIZE);
+ } else {
+ utrdlp[i].response_upiu_offset =
+ cpu_to_le16((response_offset >> 2));
+ utrdlp[i].prd_table_offset =
+ cpu_to_le16((prdt_offset >> 2));
+ utrdlp[i].response_upiu_length =
+ cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
+ }
+
+ hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
+ hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr +
+ (i * sizeof(struct utp_transfer_req_desc));
+ hba->lrb[i].ucd_req_ptr =
+ (struct utp_upiu_req *)(cmd_descp + i);
+ hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr;
+ hba->lrb[i].ucd_rsp_ptr =
+ (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
+ hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr +
+ response_offset;
+ hba->lrb[i].ucd_prdt_ptr =
+ (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
+ hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr +
+ prdt_offset;
+ }
+}
+
+/**
+ * ufshcd_dme_link_startup - Notify Unipro to perform link startup
+ * @hba: per adapter instance
+ *
+ * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
+ * in order to initialize the Unipro link startup procedure.
+ * Once the Unipro links are up, the device connected to the controller
+ * is detected.
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_dme_link_startup(struct ufs_hba *hba)
+{
+ struct uic_command uic_cmd = {0};
+ int ret;
+
+ uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
+
+ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+ if (ret)
+ dev_dbg(hba->dev,
+ "dme-link-startup: error code %d\n", ret);
+ return ret;
+}
+/**
+ * ufshcd_dme_reset - UIC command for DME_RESET
+ * @hba: per adapter instance
+ *
+ * DME_RESET command is issued in order to reset UniPro stack.
+ * This function now deal with cold reset.
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_dme_reset(struct ufs_hba *hba)
+{
+ struct uic_command uic_cmd = {0};
+ int ret;
+
+ uic_cmd.command = UIC_CMD_DME_RESET;
+
+ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+ if (ret)
+ dev_err(hba->dev,
+ "dme-reset: error code %d\n", ret);
+
+ return ret;
+}
+
+/**
+ * ufshcd_dme_enable - UIC command for DME_ENABLE
+ * @hba: per adapter instance
+ *
+ * DME_ENABLE command is issued in order to enable UniPro stack.
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_dme_enable(struct ufs_hba *hba)
+{
+ struct uic_command uic_cmd = {0};
+ int ret;
+
+ uic_cmd.command = UIC_CMD_DME_ENABLE;
+
+ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+ if (ret)
+ dev_err(hba->dev,
+ "dme-reset: error code %d\n", ret);
+
+ return ret;
+}
+
+static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
+{
+ #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
+ unsigned long min_sleep_time_us;
+
+ if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
+ return;
+
+ /*
+ * last_dme_cmd_tstamp will be 0 only for 1st call to
+ * this function
+ */
+ if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
+ min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
+ } else {
+ unsigned long delta =
+ (unsigned long) ktime_to_us(
+ ktime_sub(ktime_get(),
+ hba->last_dme_cmd_tstamp));
+
+ if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
+ min_sleep_time_us =
+ MIN_DELAY_BEFORE_DME_CMDS_US - delta;
+ else
+ return; /* no more delay required */
+ }
+
+ /* allow sleep for extra 50us if needed */
+ usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
+}
+
+/**
+ * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
+ * @hba: per adapter instance
+ * @attr_sel: uic command argument1
+ * @attr_set: attribute set type as uic command argument2
+ * @mib_val: setting value as uic command argument3
+ * @peer: indicate whether peer or local
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
+ u8 attr_set, u32 mib_val, u8 peer)
+{
+ struct uic_command uic_cmd = {0};
+ static const char *const action[] = {
+ "dme-set",
+ "dme-peer-set"
+ };
+ const char *set = action[!!peer];
+ int ret;
+ int retries = UFS_UIC_COMMAND_RETRIES;
+
+ uic_cmd.command = peer ?
+ UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
+ uic_cmd.argument1 = attr_sel;
+ uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
+ uic_cmd.argument3 = mib_val;
+
+ do {
+ /* for peer attributes we retry upon failure */
+ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+ if (ret)
+ dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
+ set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
+ } while (ret && peer && --retries);
+
+ if (ret)
+ dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
+ set, UIC_GET_ATTR_ID(attr_sel), mib_val,
+ UFS_UIC_COMMAND_RETRIES - retries);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
+
+/**
+ * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
+ * @hba: per adapter instance
+ * @attr_sel: uic command argument1
+ * @mib_val: the value of the attribute as returned by the UIC command
+ * @peer: indicate whether peer or local
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
+ u32 *mib_val, u8 peer)
+{
+ struct uic_command uic_cmd = {0};
+ static const char *const action[] = {
+ "dme-get",
+ "dme-peer-get"
+ };
+ const char *get = action[!!peer];
+ int ret;
+ int retries = UFS_UIC_COMMAND_RETRIES;
+ struct ufs_pa_layer_attr orig_pwr_info;
+ struct ufs_pa_layer_attr temp_pwr_info;
+ bool pwr_mode_change = false;
+
+ if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
+ orig_pwr_info = hba->pwr_info;
+ temp_pwr_info = orig_pwr_info;
+
+ if (orig_pwr_info.pwr_tx == FAST_MODE ||
+ orig_pwr_info.pwr_rx == FAST_MODE) {
+ temp_pwr_info.pwr_tx = FASTAUTO_MODE;
+ temp_pwr_info.pwr_rx = FASTAUTO_MODE;
+ pwr_mode_change = true;
+ } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
+ orig_pwr_info.pwr_rx == SLOW_MODE) {
+ temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
+ temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
+ pwr_mode_change = true;
+ }
+ if (pwr_mode_change) {
+ ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
+ if (ret)
+ goto out;
+ }
+ }
+
+ uic_cmd.command = peer ?
+ UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
+ uic_cmd.argument1 = attr_sel;
+
+ do {
+ /* for peer attributes we retry upon failure */
+ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+ if (ret)
+ dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
+ get, UIC_GET_ATTR_ID(attr_sel), ret);
+ } while (ret && peer && --retries);
+
+ if (ret)
+ dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
+ get, UIC_GET_ATTR_ID(attr_sel),
+ UFS_UIC_COMMAND_RETRIES - retries);
+
+ if (mib_val && !ret)
+ *mib_val = uic_cmd.argument3;
+
+ if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
+ && pwr_mode_change)
+ ufshcd_change_power_mode(hba, &orig_pwr_info);
+out:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
+
+/**
+ * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
+ * state) and waits for it to take effect.
+ *
+ * @hba: per adapter instance
+ * @cmd: UIC command to execute
+ *
+ * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
+ * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
+ * and device UniPro link and hence it's final completion would be indicated by
+ * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
+ * addition to normal UIC command completion Status (UCCS). This function only
+ * returns after the relevant status bits indicate the completion.
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
+{
+ struct completion uic_async_done;
+ unsigned long flags;
+ u8 status;
+ int ret;
+ bool reenable_intr = false;
+
+ mutex_lock(&hba->uic_cmd_mutex);
+ init_completion(&uic_async_done);
+ ufshcd_add_delay_before_dme_cmd(hba);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->uic_async_done = &uic_async_done;
+ if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
+ ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
+ /*
+ * Make sure UIC command completion interrupt is disabled before
+ * issuing UIC command.
+ */
+ wmb();
+ reenable_intr = true;
+ }
+ ret = __ufshcd_send_uic_cmd(hba, cmd, false);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ if (ret) {
+ dev_err(hba->dev,
+ "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
+ cmd->command, cmd->argument3, ret);
+ goto out;
+ }
+
+ if (!wait_for_completion_timeout(hba->uic_async_done,
+ msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
+ dev_err(hba->dev,
+ "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
+ cmd->command, cmd->argument3);
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ status = ufshcd_get_upmcrs(hba);
+ if (status != PWR_LOCAL) {
+ dev_err(hba->dev,
+ "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
+ cmd->command, status);
+ ret = (status != PWR_OK) ? status : -1;
+ }
+out:
+ if (ret) {
+ ufshcd_print_host_state(hba);
+ ufshcd_print_pwr_info(hba);
+ ufshcd_print_host_regs(hba);
+ }
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->active_uic_cmd = NULL;
+ hba->uic_async_done = NULL;
+ if (reenable_intr)
+ ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ mutex_unlock(&hba->uic_cmd_mutex);
+
+ return ret;
+}
+
+/**
+ * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
+ * using DME_SET primitives.
+ * @hba: per adapter instance
+ * @mode: powr mode value
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
+{
+ struct uic_command uic_cmd = {0};
+ int ret;
+
+ if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
+ ret = ufshcd_dme_set(hba,
+ UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
+ if (ret) {
+ dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
+ __func__, ret);
+ goto out;
+ }
+ }
+
+ uic_cmd.command = UIC_CMD_DME_SET;
+ uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
+ uic_cmd.argument3 = mode;
+ ufshcd_hold(hba, false);
+ ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+ ufshcd_release(hba);
+
+out:
+ return ret;
+}
+
+static int ufshcd_link_recovery(struct ufs_hba *hba)
+{
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->ufshcd_state = UFSHCD_STATE_RESET;
+ ufshcd_set_eh_in_progress(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ ret = ufshcd_host_reset_and_restore(hba);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (ret)
+ hba->ufshcd_state = UFSHCD_STATE_ERROR;
+ ufshcd_clear_eh_in_progress(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ if (ret)
+ dev_err(hba->dev, "%s: link recovery failed, err %d",
+ __func__, ret);
+
+ return ret;
+}
+
+static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
+{
+ int ret;
+ struct uic_command uic_cmd = {0};
+ ktime_t start = ktime_get();
+
+ ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
+
+ uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
+ ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+ trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
+ ktime_to_us(ktime_sub(ktime_get(), start)), ret);
+
+ if (ret) {
+ dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
+ __func__, ret);
+
+ /*
+ * If link recovery fails then return error so that caller
+ * don't retry the hibern8 enter again.
+ */
+ if (ufshcd_link_recovery(hba))
+ ret = -ENOLINK;
+ } else
+ ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
+ POST_CHANGE);
+
+ return ret;
+}
+
+static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
+{
+ int ret = 0, retries;
+
+ for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
+ ret = __ufshcd_uic_hibern8_enter(hba);
+ if (!ret || ret == -ENOLINK)
+ goto out;
+ }
+out:
+ return ret;
+}
+
+static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
+{
+ struct uic_command uic_cmd = {0};
+ int ret;
+ ktime_t start = ktime_get();
+
+ ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
+
+ uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
+ ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+ trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
+ ktime_to_us(ktime_sub(ktime_get(), start)), ret);
+
+ if (ret) {
+ dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
+ __func__, ret);
+ ret = ufshcd_link_recovery(hba);
+ } else {
+ ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
+ POST_CHANGE);
+ hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
+ hba->ufs_stats.hibern8_exit_cnt++;
+ }
+
+ return ret;
+}
+
+static void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
+{
+ unsigned long flags;
+
+ if (!(hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) || !hba->ahit)
+ return;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+
+ /**
+ * ufshcd_init_pwr_info - setting the POR (power on reset)
+ * values in hba power info
+ * @hba: per-adapter instance
+ */
+static void ufshcd_init_pwr_info(struct ufs_hba *hba)
+{
+ hba->pwr_info.gear_rx = UFS_PWM_G1;
+ hba->pwr_info.gear_tx = UFS_PWM_G1;
+ hba->pwr_info.lane_rx = 1;
+ hba->pwr_info.lane_tx = 1;
+ hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
+ hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
+ hba->pwr_info.hs_rate = 0;
+}
+
+/**
+ * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
+ * @hba: per-adapter instance
+ */
+static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
+{
+ struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
+
+ if (hba->max_pwr_info.is_valid)
+ return 0;
+
+ pwr_info->pwr_tx = FAST_MODE;
+ pwr_info->pwr_rx = FAST_MODE;
+ pwr_info->hs_rate = PA_HS_MODE_B;
+
+ /* Get the connected lane count */
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
+ &pwr_info->lane_rx);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
+ &pwr_info->lane_tx);
+
+ if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
+ dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
+ __func__,
+ pwr_info->lane_rx,
+ pwr_info->lane_tx);
+ return -EINVAL;
+ }
+
+ /*
+ * First, get the maximum gears of HS speed.
+ * If a zero value, it means there is no HSGEAR capability.
+ * Then, get the maximum gears of PWM speed.
+ */
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
+ if (!pwr_info->gear_rx) {
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
+ &pwr_info->gear_rx);
+ if (!pwr_info->gear_rx) {
+ dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
+ __func__, pwr_info->gear_rx);
+ return -EINVAL;
+ }
+ pwr_info->pwr_rx = SLOW_MODE;
+ }
+
+ ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
+ &pwr_info->gear_tx);
+ if (!pwr_info->gear_tx) {
+ ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
+ &pwr_info->gear_tx);
+ if (!pwr_info->gear_tx) {
+ dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
+ __func__, pwr_info->gear_tx);
+ return -EINVAL;
+ }
+ pwr_info->pwr_tx = SLOW_MODE;
+ }
+
+ hba->max_pwr_info.is_valid = true;
+ return 0;
+}
+
+static int ufshcd_change_power_mode(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *pwr_mode)
+{
+ int ret;
+
+ /* if already configured to the requested pwr_mode */
+ if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
+ pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
+ pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
+ pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
+ pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
+ pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
+ pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
+ dev_dbg(hba->dev, "%s: power already configured\n", __func__);
+ return 0;
+ }
+
+ /*
+ * Configure attributes for power mode change with below.
+ * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
+ * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
+ * - PA_HSSERIES
+ */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
+ pwr_mode->lane_rx);
+ if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
+ pwr_mode->pwr_rx == FAST_MODE)
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
+ else
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
+ pwr_mode->lane_tx);
+ if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
+ pwr_mode->pwr_tx == FAST_MODE)
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
+ else
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
+
+ if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
+ pwr_mode->pwr_tx == FASTAUTO_MODE ||
+ pwr_mode->pwr_rx == FAST_MODE ||
+ pwr_mode->pwr_tx == FAST_MODE)
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
+ pwr_mode->hs_rate);
+
+ ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
+ | pwr_mode->pwr_tx);
+
+ if (ret) {
+ dev_err(hba->dev,
+ "%s: power mode change failed %d\n", __func__, ret);
+ } else {
+ ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
+ pwr_mode);
+
+ memcpy(&hba->pwr_info, pwr_mode,
+ sizeof(struct ufs_pa_layer_attr));
+ }
+
+ return ret;
+}
+
+/**
+ * ufshcd_config_pwr_mode - configure a new power mode
+ * @hba: per-adapter instance
+ * @desired_pwr_mode: desired power configuration
+ */
+int ufshcd_config_pwr_mode(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *desired_pwr_mode)
+{
+ struct ufs_pa_layer_attr final_params = { 0 };
+ int ret;
+
+ ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
+ desired_pwr_mode, &final_params);
+
+ if (ret)
+ memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
+
+ ret = ufshcd_change_power_mode(hba, &final_params);
+ if (!ret)
+ ufshcd_print_pwr_info(hba);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
+
+/**
+ * ufshcd_complete_dev_init() - checks device readiness
+ * @hba: per-adapter instance
+ *
+ * Set fDeviceInit flag and poll until device toggles it.
+ */
+static int ufshcd_complete_dev_init(struct ufs_hba *hba)
+{
+ int i;
+ int err;
+ bool flag_res = 1;
+
+ err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
+ QUERY_FLAG_IDN_FDEVICEINIT, NULL);
+ if (err) {
+ dev_err(hba->dev,
+ "%s setting fDeviceInit flag failed with error %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ /* poll for max. 1000 iterations for fDeviceInit flag to clear */
+ for (i = 0; i < 1000 && !err && flag_res; i++)
+ err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
+ QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
+
+ if (err)
+ dev_err(hba->dev,
+ "%s reading fDeviceInit flag failed with error %d\n",
+ __func__, err);
+ else if (flag_res)
+ dev_err(hba->dev,
+ "%s fDeviceInit was not cleared by the device\n",
+ __func__);
+
+out:
+ return err;
+}
+
+/**
+ * ufshcd_make_hba_operational - Make UFS controller operational
+ * @hba: per adapter instance
+ *
+ * To bring UFS host controller to operational state,
+ * 1. Enable required interrupts
+ * 2. Configure interrupt aggregation
+ * 3. Program UTRL and UTMRL base address
+ * 4. Configure run-stop-registers
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_make_hba_operational(struct ufs_hba *hba)
+{
+ int err = 0;
+ u32 reg;
+
+ /* Enable required interrupts */
+ ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
+
+ /* Configure interrupt aggregation */
+ if (ufshcd_is_intr_aggr_allowed(hba))
+ ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
+ else
+ ufshcd_disable_intr_aggr(hba);
+
+ /* Configure UTRL and UTMRL base address registers */
+ ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
+ REG_UTP_TRANSFER_REQ_LIST_BASE_L);
+ ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
+ REG_UTP_TRANSFER_REQ_LIST_BASE_H);
+ ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
+ REG_UTP_TASK_REQ_LIST_BASE_L);
+ ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
+ REG_UTP_TASK_REQ_LIST_BASE_H);
+
+ /*
+ * Make sure base address and interrupt setup are updated before
+ * enabling the run/stop registers below.
+ */
+ wmb();
+
+ /*
+ * UCRDY, UTMRLDY and UTRLRDY bits must be 1
+ */
+ reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
+ if (!(ufshcd_get_lists_status(reg))) {
+ ufshcd_enable_run_stop_reg(hba);
+ } else {
+ dev_err(hba->dev,
+ "Host controller not ready to process requests");
+ err = -EIO;
+ goto out;
+ }
+
+out:
+ return err;
+}
+
+/**
+ * ufshcd_hba_stop - Send controller to reset state
+ * @hba: per adapter instance
+ * @can_sleep: perform sleep or just spin
+ */
+static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
+{
+ int err;
+
+ ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
+ err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
+ CONTROLLER_ENABLE, CONTROLLER_DISABLE,
+ 10, 1, can_sleep);
+ if (err)
+ dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
+}
+
+/**
+ * ufshcd_hba_execute_hce - initialize the controller
+ * @hba: per adapter instance
+ *
+ * The controller resets itself and controller firmware initialization
+ * sequence kicks off. When controller is ready it will set
+ * the Host Controller Enable bit to 1.
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
+{
+ int retry;
+
+ /*
+ * msleep of 1 and 5 used in this function might result in msleep(20),
+ * but it was necessary to send the UFS FPGA to reset mode during
+ * development and testing of this driver. msleep can be changed to
+ * mdelay and retry count can be reduced based on the controller.
+ */
+ if (!ufshcd_is_hba_active(hba))
+ /* change controller state to "reset state" */
+ ufshcd_hba_stop(hba, true);
+
+ /* UniPro link is disabled at this point */
+ ufshcd_set_link_off(hba);
+
+ ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
+
+ /* start controller initialization sequence */
+ ufshcd_hba_start(hba);
+
+ /*
+ * To initialize a UFS host controller HCE bit must be set to 1.
+ * During initialization the HCE bit value changes from 1->0->1.
+ * When the host controller completes initialization sequence
+ * it sets the value of HCE bit to 1. The same HCE bit is read back
+ * to check if the controller has completed initialization sequence.
+ * So without this delay the value HCE = 1, set in the previous
+ * instruction might be read back.
+ * This delay can be changed based on the controller.
+ */
+ msleep(1);
+
+ /* wait for the host controller to complete initialization */
+ retry = 10;
+ while (ufshcd_is_hba_active(hba)) {
+ if (retry) {
+ retry--;
+ } else {
+ dev_err(hba->dev,
+ "Controller enable failed\n");
+ return -EIO;
+ }
+ msleep(5);
+ }
+
+ /* enable UIC related interrupts */
+ ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
+
+ ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
+
+ return 0;
+}
+
+static int ufshcd_hba_enable(struct ufs_hba *hba)
+{
+ int ret;
+
+ if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
+ ufshcd_set_link_off(hba);
+ ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
+
+ /* enable UIC related interrupts */
+ ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
+ ret = ufshcd_dme_reset(hba);
+ if (!ret) {
+ ret = ufshcd_dme_enable(hba);
+ if (!ret)
+ ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
+ if (ret)
+ dev_err(hba->dev,
+ "Host controller enable failed with non-hce\n");
+ }
+ } else {
+ ret = ufshcd_hba_execute_hce(hba);
+ }
+
+ return ret;
+}
+static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
+{
+ int tx_lanes, i, err = 0;
+
+ if (!peer)
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
+ &tx_lanes);
+ else
+ ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
+ &tx_lanes);
+ for (i = 0; i < tx_lanes; i++) {
+ if (!peer)
+ err = ufshcd_dme_set(hba,
+ UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
+ UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
+ 0);
+ else
+ err = ufshcd_dme_peer_set(hba,
+ UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
+ UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
+ 0);
+ if (err) {
+ dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
+ __func__, peer, i, err);
+ break;
+ }
+ }
+
+ return err;
+}
+
+static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
+{
+ return ufshcd_disable_tx_lcc(hba, true);
+}
+
+/**
+ * ufshcd_link_startup - Initialize unipro link startup
+ * @hba: per adapter instance
+ *
+ * Returns 0 for success, non-zero in case of failure
+ */
+static int ufshcd_link_startup(struct ufs_hba *hba)
+{
+ int ret;
+ int retries = DME_LINKSTARTUP_RETRIES;
+ bool link_startup_again = false;
+
+ /*
+ * If UFS device isn't active then we will have to issue link startup
+ * 2 times to make sure the device state move to active.
+ */
+ if (!ufshcd_is_ufs_dev_active(hba))
+ link_startup_again = true;
+
+link_startup:
+ do {
+ ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
+
+ ret = ufshcd_dme_link_startup(hba);
+
+ /* check if device is detected by inter-connect layer */
+ if (!ret && !ufshcd_is_device_present(hba)) {
+ dev_err(hba->dev, "%s: Device not present\n", __func__);
+ ret = -ENXIO;
+ goto out;
+ }
+
+ /*
+ * DME link lost indication is only received when link is up,
+ * but we can't be sure if the link is up until link startup
+ * succeeds. So reset the local Uni-Pro and try again.
+ */
+ if (ret && ufshcd_hba_enable(hba))
+ goto out;
+ } while (ret && retries--);
+
+ if (ret)
+ /* failed to get the link up... retire */
+ goto out;
+
+ if (link_startup_again) {
+ link_startup_again = false;
+ retries = DME_LINKSTARTUP_RETRIES;
+ goto link_startup;
+ }
+
+ /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
+ ufshcd_init_pwr_info(hba);
+ ufshcd_print_pwr_info(hba);
+
+ if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
+ ret = ufshcd_disable_device_tx_lcc(hba);
+ if (ret)
+ goto out;
+ }
+
+ /* Include any host controller configuration via UIC commands */
+ ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_make_hba_operational(hba);
+out:
+ if (ret) {
+ dev_err(hba->dev, "link startup failed %d\n", ret);
+ ufshcd_print_host_state(hba);
+ ufshcd_print_pwr_info(hba);
+ ufshcd_print_host_regs(hba);
+ }
+ return ret;
+}
+
+/**
+ * ufshcd_verify_dev_init() - Verify device initialization
+ * @hba: per-adapter instance
+ *
+ * Send NOP OUT UPIU and wait for NOP IN response to check whether the
+ * device Transport Protocol (UTP) layer is ready after a reset.
+ * If the UTP layer at the device side is not initialized, it may
+ * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
+ * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
+ */
+static int ufshcd_verify_dev_init(struct ufs_hba *hba)
+{
+ int err = 0;
+ int retries;
+
+ ufshcd_hold(hba, false);
+ mutex_lock(&hba->dev_cmd.lock);
+ for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
+ NOP_OUT_TIMEOUT);
+
+ if (!err || err == -ETIMEDOUT)
+ break;
+
+ dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
+ }
+ mutex_unlock(&hba->dev_cmd.lock);
+ ufshcd_release(hba);
+
+ if (err)
+ dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
+ return err;
+}
+
+/**
+ * ufshcd_set_queue_depth - set lun queue depth
+ * @sdev: pointer to SCSI device
+ *
+ * Read bLUQueueDepth value and activate scsi tagged command
+ * queueing. For WLUN, queue depth is set to 1. For best-effort
+ * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
+ * value that host can queue.
+ */
+static void ufshcd_set_queue_depth(struct scsi_device *sdev)
+{
+ int ret = 0;
+ u8 lun_qdepth;
+ struct ufs_hba *hba;
+
+ hba = shost_priv(sdev->host);
+
+ lun_qdepth = hba->nutrs;
+ ret = ufshcd_read_unit_desc_param(hba,
+ ufshcd_scsi_to_upiu_lun(sdev->lun),
+ UNIT_DESC_PARAM_LU_Q_DEPTH,
+ &lun_qdepth,
+ sizeof(lun_qdepth));
+
+ /* Some WLUN doesn't support unit descriptor */
+ if (ret == -EOPNOTSUPP)
+ lun_qdepth = 1;
+ else if (!lun_qdepth)
+ /* eventually, we can figure out the real queue depth */
+ lun_qdepth = hba->nutrs;
+ else
+ lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
+
+ dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
+ __func__, lun_qdepth);
+ scsi_change_queue_depth(sdev, lun_qdepth);
+}
+
+/*
+ * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
+ * @hba: per-adapter instance
+ * @lun: UFS device lun id
+ * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
+ *
+ * Returns 0 in case of success and b_lu_write_protect status would be returned
+ * @b_lu_write_protect parameter.
+ * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
+ * Returns -EINVAL in case of invalid parameters passed to this function.
+ */
+static int ufshcd_get_lu_wp(struct ufs_hba *hba,
+ u8 lun,
+ u8 *b_lu_write_protect)
+{
+ int ret;
+
+ if (!b_lu_write_protect)
+ ret = -EINVAL;
+ /*
+ * According to UFS device spec, RPMB LU can't be write
+ * protected so skip reading bLUWriteProtect parameter for
+ * it. For other W-LUs, UNIT DESCRIPTOR is not available.
+ */
+ else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
+ ret = -ENOTSUPP;
+ else
+ ret = ufshcd_read_unit_desc_param(hba,
+ lun,
+ UNIT_DESC_PARAM_LU_WR_PROTECT,
+ b_lu_write_protect,
+ sizeof(*b_lu_write_protect));
+ return ret;
+}
+
+/**
+ * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
+ * status
+ * @hba: per-adapter instance
+ * @sdev: pointer to SCSI device
+ *
+ */
+static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
+ struct scsi_device *sdev)
+{
+ if (hba->dev_info.f_power_on_wp_en &&
+ !hba->dev_info.is_lu_power_on_wp) {
+ u8 b_lu_write_protect;
+
+ if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
+ &b_lu_write_protect) &&
+ (b_lu_write_protect == UFS_LU_POWER_ON_WP))
+ hba->dev_info.is_lu_power_on_wp = true;
+ }
+}
+
+/**
+ * ufshcd_slave_alloc - handle initial SCSI device configurations
+ * @sdev: pointer to SCSI device
+ *
+ * Returns success
+ */
+static int ufshcd_slave_alloc(struct scsi_device *sdev)
+{
+ struct ufs_hba *hba;
+
+ hba = shost_priv(sdev->host);
+
+ /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
+ sdev->use_10_for_ms = 1;
+
+ /* allow SCSI layer to restart the device in case of errors */
+ sdev->allow_restart = 1;
+
+ /* REPORT SUPPORTED OPERATION CODES is not supported */
+ sdev->no_report_opcodes = 1;
+
+ /* WRITE_SAME command is not supported */
+ sdev->no_write_same = 1;
+
+ ufshcd_set_queue_depth(sdev);
+
+ ufshcd_get_lu_power_on_wp_status(hba, sdev);
+
+ return 0;
+}
+
+/**
+ * ufshcd_change_queue_depth - change queue depth
+ * @sdev: pointer to SCSI device
+ * @depth: required depth to set
+ *
+ * Change queue depth and make sure the max. limits are not crossed.
+ */
+static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
+{
+ struct ufs_hba *hba = shost_priv(sdev->host);
+
+ if (depth > hba->nutrs)
+ depth = hba->nutrs;
+ return scsi_change_queue_depth(sdev, depth);
+}
+
+/**
+ * ufshcd_slave_configure - adjust SCSI device configurations
+ * @sdev: pointer to SCSI device
+ */
+static int ufshcd_slave_configure(struct scsi_device *sdev)
+{
+ struct request_queue *q = sdev->request_queue;
+
+ blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
+ blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
+
+ return 0;
+}
+
+/**
+ * ufshcd_slave_destroy - remove SCSI device configurations
+ * @sdev: pointer to SCSI device
+ */
+static void ufshcd_slave_destroy(struct scsi_device *sdev)
+{
+ struct ufs_hba *hba;
+
+ hba = shost_priv(sdev->host);
+ /* Drop the reference as it won't be needed anymore */
+ if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->sdev_ufs_device = NULL;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ }
+}
+
+/**
+ * ufshcd_task_req_compl - handle task management request completion
+ * @hba: per adapter instance
+ * @index: index of the completed request
+ * @resp: task management service response
+ *
+ * Returns non-zero value on error, zero on success
+ */
+static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
+{
+ struct utp_task_req_desc *task_req_descp;
+ struct utp_upiu_task_rsp *task_rsp_upiup;
+ unsigned long flags;
+ int ocs_value;
+ int task_result;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+
+ /* Clear completed tasks from outstanding_tasks */
+ __clear_bit(index, &hba->outstanding_tasks);
+
+ task_req_descp = hba->utmrdl_base_addr;
+ ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
+
+ if (ocs_value == OCS_SUCCESS) {
+ task_rsp_upiup = (struct utp_upiu_task_rsp *)
+ task_req_descp[index].task_rsp_upiu;
+ task_result = be32_to_cpu(task_rsp_upiup->output_param1);
+ task_result = task_result & MASK_TM_SERVICE_RESP;
+ if (resp)
+ *resp = (u8)task_result;
+ } else {
+ dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
+ __func__, ocs_value);
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ return ocs_value;
+}
+
+/**
+ * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
+ * @lrbp: pointer to local reference block of completed command
+ * @scsi_status: SCSI command status
+ *
+ * Returns value base on SCSI command status
+ */
+static inline int
+ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
+{
+ int result = 0;
+
+ switch (scsi_status) {
+ case SAM_STAT_CHECK_CONDITION:
+ ufshcd_copy_sense_data(lrbp);
+ case SAM_STAT_GOOD:
+ result |= DID_OK << 16 |
+ COMMAND_COMPLETE << 8 |
+ scsi_status;
+ break;
+ case SAM_STAT_TASK_SET_FULL:
+ case SAM_STAT_BUSY:
+ case SAM_STAT_TASK_ABORTED:
+ ufshcd_copy_sense_data(lrbp);
+ result |= scsi_status;
+ break;
+ default:
+ result |= DID_ERROR << 16;
+ break;
+ } /* end of switch */
+
+ return result;
+}
+
+/**
+ * ufshcd_transfer_rsp_status - Get overall status of the response
+ * @hba: per adapter instance
+ * @lrbp: pointer to local reference block of completed command
+ *
+ * Returns result of the command to notify SCSI midlayer
+ */
+static inline int
+ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ int result = 0;
+ int scsi_status;
+ int ocs;
+
+ /* overall command status of utrd */
+ ocs = ufshcd_get_tr_ocs(lrbp);
+
+ switch (ocs) {
+ case OCS_SUCCESS:
+ result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
+ hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
+ switch (result) {
+ case UPIU_TRANSACTION_RESPONSE:
+ /*
+ * get the response UPIU result to extract
+ * the SCSI command status
+ */
+ result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
+
+ /*
+ * get the result based on SCSI status response
+ * to notify the SCSI midlayer of the command status
+ */
+ scsi_status = result & MASK_SCSI_STATUS;
+ result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
+
+ /*
+ * Currently we are only supporting BKOPs exception
+ * events hence we can ignore BKOPs exception event
+ * during power management callbacks. BKOPs exception
+ * event is not expected to be raised in runtime suspend
+ * callback as it allows the urgent bkops.
+ * During system suspend, we are anyway forcefully
+ * disabling the bkops and if urgent bkops is needed
+ * it will be enabled on system resume. Long term
+ * solution could be to abort the system suspend if
+ * UFS device needs urgent BKOPs.
+ */
+ if (!hba->pm_op_in_progress &&
+ ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
+ schedule_work(&hba->eeh_work);
+ break;
+ case UPIU_TRANSACTION_REJECT_UPIU:
+ /* TODO: handle Reject UPIU Response */
+ result = DID_ERROR << 16;
+ dev_err(hba->dev,
+ "Reject UPIU not fully implemented\n");
+ break;
+ default:
+ result = DID_ERROR << 16;
+ dev_err(hba->dev,
+ "Unexpected request response code = %x\n",
+ result);
+ break;
+ }
+ break;
+ case OCS_ABORTED:
+ result |= DID_ABORT << 16;
+ break;
+ case OCS_INVALID_COMMAND_STATUS:
+ result |= DID_REQUEUE << 16;
+ break;
+ case OCS_INVALID_CMD_TABLE_ATTR:
+ case OCS_INVALID_PRDT_ATTR:
+ case OCS_MISMATCH_DATA_BUF_SIZE:
+ case OCS_MISMATCH_RESP_UPIU_SIZE:
+ case OCS_PEER_COMM_FAILURE:
+ case OCS_FATAL_ERROR:
+ default:
+ result |= DID_ERROR << 16;
+ dev_err(hba->dev,
+ "OCS error from controller = %x for tag %d\n",
+ ocs, lrbp->task_tag);
+ ufshcd_print_host_regs(hba);
+ ufshcd_print_host_state(hba);
+ break;
+ } /* end of switch */
+
+ if (host_byte(result) != DID_OK)
+ ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
+ return result;
+}
+
+/**
+ * ufshcd_uic_cmd_compl - handle completion of uic command
+ * @hba: per adapter instance
+ * @intr_status: interrupt status generated by the controller
+ */
+static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
+{
+ if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
+ hba->active_uic_cmd->argument2 |=
+ ufshcd_get_uic_cmd_result(hba);
+ hba->active_uic_cmd->argument3 =
+ ufshcd_get_dme_attr_val(hba);
+ complete(&hba->active_uic_cmd->done);
+ }
+
+ if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
+ complete(hba->uic_async_done);
+}
+
+/**
+ * __ufshcd_transfer_req_compl - handle SCSI and query command completion
+ * @hba: per adapter instance
+ * @completed_reqs: requests to complete
+ */
+static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
+ unsigned long completed_reqs)
+{
+ struct ufshcd_lrb *lrbp;
+ struct scsi_cmnd *cmd;
+ int result;
+ int index;
+
+ for_each_set_bit(index, &completed_reqs, hba->nutrs) {
+ lrbp = &hba->lrb[index];
+ cmd = lrbp->cmd;
+ if (cmd) {
+ ufshcd_add_command_trace(hba, index, "complete");
+ result = ufshcd_transfer_rsp_status(hba, lrbp);
+ scsi_dma_unmap(cmd);
+ cmd->result = result;
+ /* Mark completed command as NULL in LRB */
+ lrbp->cmd = NULL;
+ clear_bit_unlock(index, &hba->lrb_in_use);
+ /* Do not touch lrbp after scsi done */
+ cmd->scsi_done(cmd);
+ __ufshcd_release(hba);
+ } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
+ lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
+ if (hba->dev_cmd.complete) {
+ ufshcd_add_command_trace(hba, index,
+ "dev_complete");
+ complete(hba->dev_cmd.complete);
+ }
+ }
+ if (ufshcd_is_clkscaling_supported(hba))
+ hba->clk_scaling.active_reqs--;
+
+ lrbp->compl_time_stamp = ktime_get();
+ }
+
+ /* clear corresponding bits of completed commands */
+ hba->outstanding_reqs ^= completed_reqs;
+
+ ufshcd_clk_scaling_update_busy(hba);
+
+ /* we might have free'd some tags above */
+ wake_up(&hba->dev_cmd.tag_wq);
+}
+
+/**
+ * ufshcd_transfer_req_compl - handle SCSI and query command completion
+ * @hba: per adapter instance
+ */
+static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
+{
+ unsigned long completed_reqs;
+ u32 tr_doorbell;
+
+ /* Resetting interrupt aggregation counters first and reading the
+ * DOOR_BELL afterward allows us to handle all the completed requests.
+ * In order to prevent other interrupts starvation the DB is read once
+ * after reset. The down side of this solution is the possibility of
+ * false interrupt if device completes another request after resetting
+ * aggregation and before reading the DB.
+ */
+ if (ufshcd_is_intr_aggr_allowed(hba) &&
+ !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
+ ufshcd_reset_intr_aggr(hba);
+
+ tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
+
+ __ufshcd_transfer_req_compl(hba, completed_reqs);
+}
+
+/**
+ * ufshcd_disable_ee - disable exception event
+ * @hba: per-adapter instance
+ * @mask: exception event to disable
+ *
+ * Disables exception event in the device so that the EVENT_ALERT
+ * bit is not set.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
+{
+ int err = 0;
+ u32 val;
+
+ if (!(hba->ee_ctrl_mask & mask))
+ goto out;
+
+ val = hba->ee_ctrl_mask & ~mask;
+ val &= MASK_EE_STATUS;
+ err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
+ if (!err)
+ hba->ee_ctrl_mask &= ~mask;
+out:
+ return err;
+}
+
+/**
+ * ufshcd_enable_ee - enable exception event
+ * @hba: per-adapter instance
+ * @mask: exception event to enable
+ *
+ * Enable corresponding exception event in the device to allow
+ * device to alert host in critical scenarios.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
+{
+ int err = 0;
+ u32 val;
+
+ if (hba->ee_ctrl_mask & mask)
+ goto out;
+
+ val = hba->ee_ctrl_mask | mask;
+ val &= MASK_EE_STATUS;
+ err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
+ if (!err)
+ hba->ee_ctrl_mask |= mask;
+out:
+ return err;
+}
+
+/**
+ * ufshcd_enable_auto_bkops - Allow device managed BKOPS
+ * @hba: per-adapter instance
+ *
+ * Allow device to manage background operations on its own. Enabling
+ * this might lead to inconsistent latencies during normal data transfers
+ * as the device is allowed to manage its own way of handling background
+ * operations.
+ *
+ * Returns zero on success, non-zero on failure.
+ */
+static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
+{
+ int err = 0;
+
+ if (hba->auto_bkops_enabled)
+ goto out;
+
+ err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
+ QUERY_FLAG_IDN_BKOPS_EN, NULL);
+ if (err) {
+ dev_err(hba->dev, "%s: failed to enable bkops %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ hba->auto_bkops_enabled = true;
+ trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
+
+ /* No need of URGENT_BKOPS exception from the device */
+ err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
+ if (err)
+ dev_err(hba->dev, "%s: failed to disable exception event %d\n",
+ __func__, err);
+out:
+ return err;
+}
+
+/**
+ * ufshcd_disable_auto_bkops - block device in doing background operations
+ * @hba: per-adapter instance
+ *
+ * Disabling background operations improves command response latency but
+ * has drawback of device moving into critical state where the device is
+ * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
+ * host is idle so that BKOPS are managed effectively without any negative
+ * impacts.
+ *
+ * Returns zero on success, non-zero on failure.
+ */
+static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
+{
+ int err = 0;
+
+ if (!hba->auto_bkops_enabled)
+ goto out;
+
+ /*
+ * If host assisted BKOPs is to be enabled, make sure
+ * urgent bkops exception is allowed.
+ */
+ err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
+ if (err) {
+ dev_err(hba->dev, "%s: failed to enable exception event %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
+ QUERY_FLAG_IDN_BKOPS_EN, NULL);
+ if (err) {
+ dev_err(hba->dev, "%s: failed to disable bkops %d\n",
+ __func__, err);
+ ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
+ goto out;
+ }
+
+ hba->auto_bkops_enabled = false;
+ trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
+out:
+ return err;
+}
+
+/**
+ * ufshcd_force_reset_auto_bkops - force reset auto bkops state
+ * @hba: per adapter instance
+ *
+ * After a device reset the device may toggle the BKOPS_EN flag
+ * to default value. The s/w tracking variables should be updated
+ * as well. This function would change the auto-bkops state based on
+ * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
+ */
+static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
+{
+ if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
+ hba->auto_bkops_enabled = false;
+ hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
+ ufshcd_enable_auto_bkops(hba);
+ } else {
+ hba->auto_bkops_enabled = true;
+ hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
+ ufshcd_disable_auto_bkops(hba);
+ }
+}
+
+static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
+{
+ return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
+}
+
+/**
+ * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
+ * @hba: per-adapter instance
+ * @status: bkops_status value
+ *
+ * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
+ * flag in the device to permit background operations if the device
+ * bkops_status is greater than or equal to "status" argument passed to
+ * this function, disable otherwise.
+ *
+ * Returns 0 for success, non-zero in case of failure.
+ *
+ * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
+ * to know whether auto bkops is enabled or disabled after this function
+ * returns control to it.
+ */
+static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
+ enum bkops_status status)
+{
+ int err;
+ u32 curr_status = 0;
+
+ err = ufshcd_get_bkops_status(hba, &curr_status);
+ if (err) {
+ dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
+ __func__, err);
+ goto out;
+ } else if (curr_status > BKOPS_STATUS_MAX) {
+ dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
+ __func__, curr_status);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (curr_status >= status)
+ err = ufshcd_enable_auto_bkops(hba);
+ else
+ err = ufshcd_disable_auto_bkops(hba);
+out:
+ return err;
+}
+
+/**
+ * ufshcd_urgent_bkops - handle urgent bkops exception event
+ * @hba: per-adapter instance
+ *
+ * Enable fBackgroundOpsEn flag in the device to permit background
+ * operations.
+ *
+ * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
+ * and negative error value for any other failure.
+ */
+static int ufshcd_urgent_bkops(struct ufs_hba *hba)
+{
+ return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
+}
+
+static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
+{
+ return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
+}
+
+static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
+{
+ int err;
+ u32 curr_status = 0;
+
+ if (hba->is_urgent_bkops_lvl_checked)
+ goto enable_auto_bkops;
+
+ err = ufshcd_get_bkops_status(hba, &curr_status);
+ if (err) {
+ dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ /*
+ * We are seeing that some devices are raising the urgent bkops
+ * exception events even when BKOPS status doesn't indicate performace
+ * impacted or critical. Handle these device by determining their urgent
+ * bkops status at runtime.
+ */
+ if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
+ dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
+ __func__, curr_status);
+ /* update the current status as the urgent bkops level */
+ hba->urgent_bkops_lvl = curr_status;
+ hba->is_urgent_bkops_lvl_checked = true;
+ }
+
+enable_auto_bkops:
+ err = ufshcd_enable_auto_bkops(hba);
+out:
+ if (err < 0)
+ dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
+ __func__, err);
+}
+
+/**
+ * ufshcd_exception_event_handler - handle exceptions raised by device
+ * @work: pointer to work data
+ *
+ * Read bExceptionEventStatus attribute from the device and handle the
+ * exception event accordingly.
+ */
+static void ufshcd_exception_event_handler(struct work_struct *work)
+{
+ struct ufs_hba *hba;
+ int err;
+ u32 status = 0;
+ hba = container_of(work, struct ufs_hba, eeh_work);
+
+ pm_runtime_get_sync(hba->dev);
+ scsi_block_requests(hba->host);
+ err = ufshcd_get_ee_status(hba, &status);
+ if (err) {
+ dev_err(hba->dev, "%s: failed to get exception status %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ status &= hba->ee_ctrl_mask;
+
+ if (status & MASK_EE_URGENT_BKOPS)
+ ufshcd_bkops_exception_event_handler(hba);
+
+out:
+ scsi_unblock_requests(hba->host);
+ pm_runtime_put_sync(hba->dev);
+ return;
+}
+
+/* Complete requests that have door-bell cleared */
+static void ufshcd_complete_requests(struct ufs_hba *hba)
+{
+ ufshcd_transfer_req_compl(hba);
+ ufshcd_tmc_handler(hba);
+}
+
+/**
+ * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
+ * to recover from the DL NAC errors or not.
+ * @hba: per-adapter instance
+ *
+ * Returns true if error handling is required, false otherwise
+ */
+static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
+{
+ unsigned long flags;
+ bool err_handling = true;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ /*
+ * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
+ * device fatal error and/or DL NAC & REPLAY timeout errors.
+ */
+ if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
+ goto out;
+
+ if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
+ ((hba->saved_err & UIC_ERROR) &&
+ (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
+ goto out;
+
+ if ((hba->saved_err & UIC_ERROR) &&
+ (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
+ int err;
+ /*
+ * wait for 50ms to see if we can get any other errors or not.
+ */
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ msleep(50);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+
+ /*
+ * now check if we have got any other severe errors other than
+ * DL NAC error?
+ */
+ if ((hba->saved_err & INT_FATAL_ERRORS) ||
+ ((hba->saved_err & UIC_ERROR) &&
+ (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
+ goto out;
+
+ /*
+ * As DL NAC is the only error received so far, send out NOP
+ * command to confirm if link is still active or not.
+ * - If we don't get any response then do error recovery.
+ * - If we get response then clear the DL NAC error bit.
+ */
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ err = ufshcd_verify_dev_init(hba);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+
+ if (err)
+ goto out;
+
+ /* Link seems to be alive hence ignore the DL NAC errors */
+ if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
+ hba->saved_err &= ~UIC_ERROR;
+ /* clear NAC error */
+ hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
+ if (!hba->saved_uic_err) {
+ err_handling = false;
+ goto out;
+ }
+ }
+out:
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return err_handling;
+}
+
+/**
+ * ufshcd_err_handler - handle UFS errors that require s/w attention
+ * @work: pointer to work structure
+ */
+static void ufshcd_err_handler(struct work_struct *work)
+{
+ struct ufs_hba *hba;
+ unsigned long flags;
+ u32 err_xfer = 0;
+ u32 err_tm = 0;
+ int err = 0;
+ int tag;
+ bool needs_reset = false;
+
+ hba = container_of(work, struct ufs_hba, eh_work);
+
+ pm_runtime_get_sync(hba->dev);
+ ufshcd_hold(hba, false);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (hba->ufshcd_state == UFSHCD_STATE_RESET)
+ goto out;
+
+ hba->ufshcd_state = UFSHCD_STATE_RESET;
+ ufshcd_set_eh_in_progress(hba);
+
+ /* Complete requests that have door-bell cleared by h/w */
+ ufshcd_complete_requests(hba);
+
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
+ bool ret;
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
+ ret = ufshcd_quirk_dl_nac_errors(hba);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (!ret)
+ goto skip_err_handling;
+ }
+ if ((hba->saved_err & INT_FATAL_ERRORS) ||
+ ((hba->saved_err & UIC_ERROR) &&
+ (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
+ UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
+ UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
+ needs_reset = true;
+
+ /*
+ * if host reset is required then skip clearing the pending
+ * transfers forcefully because they will automatically get
+ * cleared after link startup.
+ */
+ if (needs_reset)
+ goto skip_pending_xfer_clear;
+
+ /* release lock as clear command might sleep */
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ /* Clear pending transfer requests */
+ for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
+ if (ufshcd_clear_cmd(hba, tag)) {
+ err_xfer = true;
+ goto lock_skip_pending_xfer_clear;
+ }
+ }
+
+ /* Clear pending task management requests */
+ for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
+ if (ufshcd_clear_tm_cmd(hba, tag)) {
+ err_tm = true;
+ goto lock_skip_pending_xfer_clear;
+ }
+ }
+
+lock_skip_pending_xfer_clear:
+ spin_lock_irqsave(hba->host->host_lock, flags);
+
+ /* Complete the requests that are cleared by s/w */
+ ufshcd_complete_requests(hba);
+
+ if (err_xfer || err_tm)
+ needs_reset = true;
+
+skip_pending_xfer_clear:
+ /* Fatal errors need reset */
+ if (needs_reset) {
+ unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
+
+ /*
+ * ufshcd_reset_and_restore() does the link reinitialization
+ * which will need atleast one empty doorbell slot to send the
+ * device management commands (NOP and query commands).
+ * If there is no slot empty at this moment then free up last
+ * slot forcefully.
+ */
+ if (hba->outstanding_reqs == max_doorbells)
+ __ufshcd_transfer_req_compl(hba,
+ (1UL << (hba->nutrs - 1)));
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ err = ufshcd_reset_and_restore(hba);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (err) {
+ dev_err(hba->dev, "%s: reset and restore failed\n",
+ __func__);
+ hba->ufshcd_state = UFSHCD_STATE_ERROR;
+ }
+ /*
+ * Inform scsi mid-layer that we did reset and allow to handle
+ * Unit Attention properly.
+ */
+ scsi_report_bus_reset(hba->host, 0);
+ hba->saved_err = 0;
+ hba->saved_uic_err = 0;
+ }
+
+skip_err_handling:
+ if (!needs_reset) {
+ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+ if (hba->saved_err || hba->saved_uic_err)
+ dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
+ __func__, hba->saved_err, hba->saved_uic_err);
+ }
+
+ ufshcd_clear_eh_in_progress(hba);
+
+out:
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ufshcd_scsi_unblock_requests(hba);
+ ufshcd_release(hba);
+ pm_runtime_put_sync(hba->dev);
+}
+
+static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
+ u32 reg)
+{
+ reg_hist->reg[reg_hist->pos] = reg;
+ reg_hist->tstamp[reg_hist->pos] = ktime_get();
+ reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH;
+}
+
+/**
+ * ufshcd_update_uic_error - check and set fatal UIC error flags.
+ * @hba: per-adapter instance
+ */
+static void ufshcd_update_uic_error(struct ufs_hba *hba)
+{
+ u32 reg;
+
+ /* PHY layer lane error */
+ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
+ /* Ignore LINERESET indication, as this is not an error */
+ if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
+ (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
+ /*
+ * To know whether this error is fatal or not, DB timeout
+ * must be checked but this error is handled separately.
+ */
+ dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
+ ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg);
+ }
+
+ /* PA_INIT_ERROR is fatal and needs UIC reset */
+ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
+ if (reg)
+ ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg);
+
+ if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
+ hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
+ else if (hba->dev_quirks &
+ UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
+ if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
+ hba->uic_error |=
+ UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
+ else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
+ hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
+ }
+
+ /* UIC NL/TL/DME errors needs software retry */
+ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
+ if (reg) {
+ ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg);
+ hba->uic_error |= UFSHCD_UIC_NL_ERROR;
+ }
+
+ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
+ if (reg) {
+ ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg);
+ hba->uic_error |= UFSHCD_UIC_TL_ERROR;
+ }
+
+ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
+ if (reg) {
+ ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg);
+ hba->uic_error |= UFSHCD_UIC_DME_ERROR;
+ }
+
+ dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
+ __func__, hba->uic_error);
+}
+
+/**
+ * ufshcd_check_errors - Check for errors that need s/w attention
+ * @hba: per-adapter instance
+ */
+static void ufshcd_check_errors(struct ufs_hba *hba)
+{
+ bool queue_eh_work = false;
+
+ if (hba->errors & INT_FATAL_ERRORS)
+ queue_eh_work = true;
+
+ if (hba->errors & UIC_ERROR) {
+ hba->uic_error = 0;
+ ufshcd_update_uic_error(hba);
+ if (hba->uic_error)
+ queue_eh_work = true;
+ }
+
+ if (queue_eh_work) {
+ /*
+ * update the transfer error masks to sticky bits, let's do this
+ * irrespective of current ufshcd_state.
+ */
+ hba->saved_err |= hba->errors;
+ hba->saved_uic_err |= hba->uic_error;
+
+ /* handle fatal errors only when link is functional */
+ if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
+ /* block commands from scsi mid-layer */
+ ufshcd_scsi_block_requests(hba);
+
+ hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
+
+ /* dump controller state before resetting */
+ if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
+ bool pr_prdt = !!(hba->saved_err &
+ SYSTEM_BUS_FATAL_ERROR);
+
+ dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
+ __func__, hba->saved_err,
+ hba->saved_uic_err);
+
+ ufshcd_print_host_regs(hba);
+ ufshcd_print_pwr_info(hba);
+ ufshcd_print_tmrs(hba, hba->outstanding_tasks);
+ ufshcd_print_trs(hba, hba->outstanding_reqs,
+ pr_prdt);
+ }
+ schedule_work(&hba->eh_work);
+ }
+ }
+ /*
+ * if (!queue_eh_work) -
+ * Other errors are either non-fatal where host recovers
+ * itself without s/w intervention or errors that will be
+ * handled by the SCSI core layer.
+ */
+}
+
+/**
+ * ufshcd_tmc_handler - handle task management function completion
+ * @hba: per adapter instance
+ */
+static void ufshcd_tmc_handler(struct ufs_hba *hba)
+{
+ u32 tm_doorbell;
+
+ tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
+ hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
+ wake_up(&hba->tm_wq);
+}
+
+/**
+ * ufshcd_sl_intr - Interrupt service routine
+ * @hba: per adapter instance
+ * @intr_status: contains interrupts generated by the controller
+ */
+static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
+{
+ hba->errors = UFSHCD_ERROR_MASK & intr_status;
+ if (hba->errors)
+ ufshcd_check_errors(hba);
+
+ if (intr_status & UFSHCD_UIC_MASK)
+ ufshcd_uic_cmd_compl(hba, intr_status);
+
+ if (intr_status & UTP_TASK_REQ_COMPL)
+ ufshcd_tmc_handler(hba);
+
+ if (intr_status & UTP_TRANSFER_REQ_COMPL)
+ ufshcd_transfer_req_compl(hba);
+}
+
+/**
+ * ufshcd_intr - Main interrupt service routine
+ * @irq: irq number
+ * @__hba: pointer to adapter instance
+ *
+ * Returns IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
+ */
+static irqreturn_t ufshcd_intr(int irq, void *__hba)
+{
+ u32 intr_status, enabled_intr_status;
+ irqreturn_t retval = IRQ_NONE;
+ struct ufs_hba *hba = __hba;
+ int retries = hba->nutrs;
+
+ spin_lock(hba->host->host_lock);
+ intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+
+ /*
+ * There could be max of hba->nutrs reqs in flight and in worst case
+ * if the reqs get finished 1 by 1 after the interrupt status is
+ * read, make sure we handle them by checking the interrupt status
+ * again in a loop until we process all of the reqs before returning.
+ */
+ do {
+ enabled_intr_status =
+ intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+ if (intr_status)
+ ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
+ if (enabled_intr_status) {
+ ufshcd_sl_intr(hba, enabled_intr_status);
+ retval = IRQ_HANDLED;
+ }
+
+ intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+ } while (intr_status && --retries);
+
+ spin_unlock(hba->host->host_lock);
+ return retval;
+}
+
+static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
+{
+ int err = 0;
+ u32 mask = 1 << tag;
+ unsigned long flags;
+
+ if (!test_bit(tag, &hba->outstanding_tasks))
+ goto out;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_utmrl_clear(hba, tag);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ /* poll for max. 1 sec to clear door bell register by h/w */
+ err = ufshcd_wait_for_register(hba,
+ REG_UTP_TASK_REQ_DOOR_BELL,
+ mask, 0, 1000, 1000, true);
+out:
+ return err;
+}
+
+/**
+ * ufshcd_issue_tm_cmd - issues task management commands to controller
+ * @hba: per adapter instance
+ * @lun_id: LUN ID to which TM command is sent
+ * @task_id: task ID to which the TM command is applicable
+ * @tm_function: task management function opcode
+ * @tm_response: task management service response return value
+ *
+ * Returns non-zero value on error, zero on success.
+ */
+static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
+ u8 tm_function, u8 *tm_response)
+{
+ struct utp_task_req_desc *task_req_descp;
+ struct utp_upiu_task_req *task_req_upiup;
+ struct Scsi_Host *host;
+ unsigned long flags;
+ int free_slot;
+ int err;
+ int task_tag;
+
+ host = hba->host;
+
+ /*
+ * Get free slot, sleep if slots are unavailable.
+ * Even though we use wait_event() which sleeps indefinitely,
+ * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
+ */
+ wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
+ ufshcd_hold(hba, false);
+
+ spin_lock_irqsave(host->host_lock, flags);
+ task_req_descp = hba->utmrdl_base_addr;
+ task_req_descp += free_slot;
+
+ /* Configure task request descriptor */
+ task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
+ task_req_descp->header.dword_2 =
+ cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
+
+ /* Configure task request UPIU */
+ task_req_upiup =
+ (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
+ task_tag = hba->nutrs + free_slot;
+ task_req_upiup->header.dword_0 =
+ UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
+ lun_id, task_tag);
+ task_req_upiup->header.dword_1 =
+ UPIU_HEADER_DWORD(0, tm_function, 0, 0);
+ /*
+ * The host shall provide the same value for LUN field in the basic
+ * header and for Input Parameter.
+ */
+ task_req_upiup->input_param1 = cpu_to_be32(lun_id);
+ task_req_upiup->input_param2 = cpu_to_be32(task_id);
+
+ ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
+
+ /* send command to the controller */
+ __set_bit(free_slot, &hba->outstanding_tasks);
+
+ /* Make sure descriptors are ready before ringing the task doorbell */
+ wmb();
+
+ ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
+ /* Make sure that doorbell is committed immediately */
+ wmb();
+
+ spin_unlock_irqrestore(host->host_lock, flags);
+
+ ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
+
+ /* wait until the task management command is completed */
+ err = wait_event_timeout(hba->tm_wq,
+ test_bit(free_slot, &hba->tm_condition),
+ msecs_to_jiffies(TM_CMD_TIMEOUT));
+ if (!err) {
+ ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
+ dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
+ __func__, tm_function);
+ if (ufshcd_clear_tm_cmd(hba, free_slot))
+ dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
+ __func__, free_slot);
+ err = -ETIMEDOUT;
+ } else {
+ err = ufshcd_task_req_compl(hba, free_slot, tm_response);
+ ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
+ }
+
+ clear_bit(free_slot, &hba->tm_condition);
+ ufshcd_put_tm_slot(hba, free_slot);
+ wake_up(&hba->tm_tag_wq);
+
+ ufshcd_release(hba);
+ return err;
+}
+
+/**
+ * ufshcd_eh_device_reset_handler - device reset handler registered to
+ * scsi layer.
+ * @cmd: SCSI command pointer
+ *
+ * Returns SUCCESS/FAILED
+ */
+static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *host;
+ struct ufs_hba *hba;
+ unsigned int tag;
+ u32 pos;
+ int err;
+ u8 resp = 0xF;
+ struct ufshcd_lrb *lrbp;
+ unsigned long flags;
+
+ host = cmd->device->host;
+ hba = shost_priv(host);
+ tag = cmd->request->tag;
+
+ lrbp = &hba->lrb[tag];
+ err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
+ if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
+ if (!err)
+ err = resp;
+ goto out;
+ }
+
+ /* clear the commands that were pending for corresponding LUN */
+ for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
+ if (hba->lrb[pos].lun == lrbp->lun) {
+ err = ufshcd_clear_cmd(hba, pos);
+ if (err)
+ break;
+ }
+ }
+ spin_lock_irqsave(host->host_lock, flags);
+ ufshcd_transfer_req_compl(hba);
+ spin_unlock_irqrestore(host->host_lock, flags);
+
+out:
+ hba->req_abort_count = 0;
+ if (!err) {
+ err = SUCCESS;
+ } else {
+ dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
+ err = FAILED;
+ }
+ return err;
+}
+
+static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
+{
+ struct ufshcd_lrb *lrbp;
+ int tag;
+
+ for_each_set_bit(tag, &bitmap, hba->nutrs) {
+ lrbp = &hba->lrb[tag];
+ lrbp->req_abort_skip = true;
+ }
+}
+
+/**
+ * ufshcd_abort - abort a specific command
+ * @cmd: SCSI command pointer
+ *
+ * Abort the pending command in device by sending UFS_ABORT_TASK task management
+ * command, and in host controller by clearing the door-bell register. There can
+ * be race between controller sending the command to the device while abort is
+ * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
+ * really issued and then try to abort it.
+ *
+ * Returns SUCCESS/FAILED
+ */
+static int ufshcd_abort(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *host;
+ struct ufs_hba *hba;
+ unsigned long flags;
+ unsigned int tag;
+ int err = 0;
+ int poll_cnt;
+ u8 resp = 0xF;
+ struct ufshcd_lrb *lrbp;
+ u32 reg;
+
+ host = cmd->device->host;
+ hba = shost_priv(host);
+ tag = cmd->request->tag;
+ lrbp = &hba->lrb[tag];
+ if (!ufshcd_valid_tag(hba, tag)) {
+ dev_err(hba->dev,
+ "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
+ __func__, tag, cmd, cmd->request);
+ BUG();
+ }
+
+ /*
+ * Task abort to the device W-LUN is illegal. When this command
+ * will fail, due to spec violation, scsi err handling next step
+ * will be to send LU reset which, again, is a spec violation.
+ * To avoid these unnecessary/illegal step we skip to the last error
+ * handling stage: reset and restore.
+ */
+ if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
+ return ufshcd_eh_host_reset_handler(cmd);
+
+ ufshcd_hold(hba, false);
+ reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ /* If command is already aborted/completed, return SUCCESS */
+ if (!(test_bit(tag, &hba->outstanding_reqs))) {
+ dev_err(hba->dev,
+ "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
+ __func__, tag, hba->outstanding_reqs, reg);
+ goto out;
+ }
+
+ if (!(reg & (1 << tag))) {
+ dev_err(hba->dev,
+ "%s: cmd was completed, but without a notifying intr, tag = %d",
+ __func__, tag);
+ }
+
+ /* Print Transfer Request of aborted task */
+ dev_err(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
+
+ /*
+ * Print detailed info about aborted request.
+ * As more than one request might get aborted at the same time,
+ * print full information only for the first aborted request in order
+ * to reduce repeated printouts. For other aborted requests only print
+ * basic details.
+ */
+ scsi_print_command(hba->lrb[tag].cmd);
+ if (!hba->req_abort_count) {
+ ufshcd_print_host_regs(hba);
+ ufshcd_print_host_state(hba);
+ ufshcd_print_pwr_info(hba);
+ ufshcd_print_trs(hba, 1 << tag, true);
+ } else {
+ ufshcd_print_trs(hba, 1 << tag, false);
+ }
+ hba->req_abort_count++;
+
+ /* Skip task abort in case previous aborts failed and report failure */
+ if (lrbp->req_abort_skip) {
+ err = -EIO;
+ goto out;
+ }
+
+ for (poll_cnt = 100; poll_cnt; poll_cnt--) {
+ err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
+ UFS_QUERY_TASK, &resp);
+ if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
+ /* cmd pending in the device */
+ dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
+ __func__, tag);
+ break;
+ } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
+ /*
+ * cmd not pending in the device, check if it is
+ * in transition.
+ */
+ dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
+ __func__, tag);
+ reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ if (reg & (1 << tag)) {
+ /* sleep for max. 200us to stabilize */
+ usleep_range(100, 200);
+ continue;
+ }
+ /* command completed already */
+ dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
+ __func__, tag);
+ goto out;
+ } else {
+ dev_err(hba->dev,
+ "%s: no response from device. tag = %d, err %d\n",
+ __func__, tag, err);
+ if (!err)
+ err = resp; /* service response error */
+ goto out;
+ }
+ }
+
+ if (!poll_cnt) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
+ UFS_ABORT_TASK, &resp);
+ if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
+ if (!err) {
+ err = resp; /* service response error */
+ dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
+ __func__, tag, err);
+ }
+ goto out;
+ }
+
+ err = ufshcd_clear_cmd(hba, tag);
+ if (err) {
+ dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
+ __func__, tag, err);
+ goto out;
+ }
+
+ scsi_dma_unmap(cmd);
+
+ spin_lock_irqsave(host->host_lock, flags);
+ ufshcd_outstanding_req_clear(hba, tag);
+ hba->lrb[tag].cmd = NULL;
+ spin_unlock_irqrestore(host->host_lock, flags);
+
+ clear_bit_unlock(tag, &hba->lrb_in_use);
+ wake_up(&hba->dev_cmd.tag_wq);
+
+out:
+ if (!err) {
+ err = SUCCESS;
+ } else {
+ dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
+ ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
+ err = FAILED;
+ }
+
+ /*
+ * This ufshcd_release() corresponds to the original scsi cmd that got
+ * aborted here (as we won't get any IRQ for it).
+ */
+ ufshcd_release(hba);
+ return err;
+}
+
+/**
+ * ufshcd_host_reset_and_restore - reset and restore host controller
+ * @hba: per-adapter instance
+ *
+ * Note that host controller reset may issue DME_RESET to
+ * local and remote (device) Uni-Pro stack and the attributes
+ * are reset to default state.
+ *
+ * Returns zero on success, non-zero on failure
+ */
+static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
+{
+ int err;
+ unsigned long flags;
+
+ /* Reset the host controller */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_hba_stop(hba, false);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ /* scale up clocks to max frequency before full reinitialization */
+ ufshcd_scale_clks(hba, true);
+
+ err = ufshcd_hba_enable(hba);
+ if (err)
+ goto out;
+
+ /* Establish the link again and restore the device */
+ err = ufshcd_probe_hba(hba);
+
+ if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
+ err = -EIO;
+out:
+ if (err)
+ dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
+
+ return err;
+}
+
+/**
+ * ufshcd_reset_and_restore - reset and re-initialize host/device
+ * @hba: per-adapter instance
+ *
+ * Reset and recover device, host and re-establish link. This
+ * is helpful to recover the communication in fatal error conditions.
+ *
+ * Returns zero on success, non-zero on failure
+ */
+static int ufshcd_reset_and_restore(struct ufs_hba *hba)
+{
+ int err = 0;
+ unsigned long flags;
+ int retries = MAX_HOST_RESET_RETRIES;
+
+ do {
+ err = ufshcd_host_reset_and_restore(hba);
+ } while (err && --retries);
+
+ /*
+ * After reset the door-bell might be cleared, complete
+ * outstanding requests in s/w here.
+ */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_transfer_req_compl(hba);
+ ufshcd_tmc_handler(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ return err;
+}
+
+/**
+ * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
+ * @cmd: SCSI command pointer
+ *
+ * Returns SUCCESS/FAILED
+ */
+static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
+{
+ int err;
+ unsigned long flags;
+ struct ufs_hba *hba;
+
+ hba = shost_priv(cmd->device->host);
+
+ ufshcd_hold(hba, false);
+ /*
+ * Check if there is any race with fatal error handling.
+ * If so, wait for it to complete. Even though fatal error
+ * handling does reset and restore in some cases, don't assume
+ * anything out of it. We are just avoiding race here.
+ */
+ do {
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (!(work_pending(&hba->eh_work) ||
+ hba->ufshcd_state == UFSHCD_STATE_RESET ||
+ hba->ufshcd_state == UFSHCD_STATE_EH_SCHEDULED))
+ break;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
+ flush_work(&hba->eh_work);
+ } while (1);
+
+ hba->ufshcd_state = UFSHCD_STATE_RESET;
+ ufshcd_set_eh_in_progress(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ err = ufshcd_reset_and_restore(hba);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (!err) {
+ err = SUCCESS;
+ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+ } else {
+ err = FAILED;
+ hba->ufshcd_state = UFSHCD_STATE_ERROR;
+ }
+ ufshcd_clear_eh_in_progress(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ ufshcd_release(hba);
+ return err;
+}
+
+/**
+ * ufshcd_get_max_icc_level - calculate the ICC level
+ * @sup_curr_uA: max. current supported by the regulator
+ * @start_scan: row at the desc table to start scan from
+ * @buff: power descriptor buffer
+ *
+ * Returns calculated max ICC level for specific regulator
+ */
+static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
+{
+ int i;
+ int curr_uA;
+ u16 data;
+ u16 unit;
+
+ for (i = start_scan; i >= 0; i--) {
+ data = be16_to_cpup((__be16 *)&buff[2 * i]);
+ unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
+ ATTR_ICC_LVL_UNIT_OFFSET;
+ curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
+ switch (unit) {
+ case UFSHCD_NANO_AMP:
+ curr_uA = curr_uA / 1000;
+ break;
+ case UFSHCD_MILI_AMP:
+ curr_uA = curr_uA * 1000;
+ break;
+ case UFSHCD_AMP:
+ curr_uA = curr_uA * 1000 * 1000;
+ break;
+ case UFSHCD_MICRO_AMP:
+ default:
+ break;
+ }
+ if (sup_curr_uA >= curr_uA)
+ break;
+ }
+ if (i < 0) {
+ i = 0;
+ pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
+ }
+
+ return (u32)i;
+}
+
+/**
+ * ufshcd_calc_icc_level - calculate the max ICC level
+ * In case regulators are not initialized we'll return 0
+ * @hba: per-adapter instance
+ * @desc_buf: power descriptor buffer to extract ICC levels from.
+ * @len: length of desc_buff
+ *
+ * Returns calculated ICC level
+ */
+static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
+ u8 *desc_buf, int len)
+{
+ u32 icc_level = 0;
+
+ if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
+ !hba->vreg_info.vccq2) {
+ dev_err(hba->dev,
+ "%s: Regulator capability was not set, actvIccLevel=%d",
+ __func__, icc_level);
+ goto out;
+ }
+
+ if (hba->vreg_info.vcc)
+ icc_level = ufshcd_get_max_icc_level(
+ hba->vreg_info.vcc->max_uA,
+ POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
+ &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
+
+ if (hba->vreg_info.vccq)
+ icc_level = ufshcd_get_max_icc_level(
+ hba->vreg_info.vccq->max_uA,
+ icc_level,
+ &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
+
+ if (hba->vreg_info.vccq2)
+ icc_level = ufshcd_get_max_icc_level(
+ hba->vreg_info.vccq2->max_uA,
+ icc_level,
+ &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
+out:
+ return icc_level;
+}
+
+static void ufshcd_init_icc_levels(struct ufs_hba *hba)
+{
+ int ret;
+ int buff_len = hba->desc_size.pwr_desc;
+ u8 *desc_buf;
+
+ desc_buf = kmalloc(buff_len, GFP_KERNEL);
+ if (!desc_buf)
+ return;
+
+ ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
+ if (ret) {
+ dev_err(hba->dev,
+ "%s: Failed reading power descriptor.len = %d ret = %d",
+ __func__, buff_len, ret);
+ goto out;
+ }
+
+ hba->init_prefetch_data.icc_level =
+ ufshcd_find_max_sup_active_icc_level(hba,
+ desc_buf, buff_len);
+ dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
+ __func__, hba->init_prefetch_data.icc_level);
+
+ ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
+ &hba->init_prefetch_data.icc_level);
+
+ if (ret)
+ dev_err(hba->dev,
+ "%s: Failed configuring bActiveICCLevel = %d ret = %d",
+ __func__, hba->init_prefetch_data.icc_level , ret);
+
+out:
+ kfree(desc_buf);
+}
+
+/**
+ * ufshcd_scsi_add_wlus - Adds required W-LUs
+ * @hba: per-adapter instance
+ *
+ * UFS device specification requires the UFS devices to support 4 well known
+ * logical units:
+ * "REPORT_LUNS" (address: 01h)
+ * "UFS Device" (address: 50h)
+ * "RPMB" (address: 44h)
+ * "BOOT" (address: 30h)
+ * UFS device's power management needs to be controlled by "POWER CONDITION"
+ * field of SSU (START STOP UNIT) command. But this "power condition" field
+ * will take effect only when its sent to "UFS device" well known logical unit
+ * hence we require the scsi_device instance to represent this logical unit in
+ * order for the UFS host driver to send the SSU command for power management.
+ *
+ * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
+ * Block) LU so user space process can control this LU. User space may also
+ * want to have access to BOOT LU.
+ *
+ * This function adds scsi device instances for each of all well known LUs
+ * (except "REPORT LUNS" LU).
+ *
+ * Returns zero on success (all required W-LUs are added successfully),
+ * non-zero error value on failure (if failed to add any of the required W-LU).
+ */
+static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
+{
+ int ret = 0;
+ struct scsi_device *sdev_rpmb;
+ struct scsi_device *sdev_boot;
+
+ hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
+ ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
+ if (IS_ERR(hba->sdev_ufs_device)) {
+ ret = PTR_ERR(hba->sdev_ufs_device);
+ hba->sdev_ufs_device = NULL;
+ goto out;
+ }
+ scsi_device_put(hba->sdev_ufs_device);
+
+ sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
+ ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
+ if (IS_ERR(sdev_rpmb)) {
+ ret = PTR_ERR(sdev_rpmb);
+ goto remove_sdev_ufs_device;
+ }
+ scsi_device_put(sdev_rpmb);
+
+ sdev_boot = __scsi_add_device(hba->host, 0, 0,
+ ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
+ if (IS_ERR(sdev_boot))
+ dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
+ else
+ scsi_device_put(sdev_boot);
+ goto out;
+
+remove_sdev_ufs_device:
+ scsi_remove_device(hba->sdev_ufs_device);
+out:
+ return ret;
+}
+
+static int ufs_get_device_desc(struct ufs_hba *hba,
+ struct ufs_dev_desc *dev_desc)
+{
+ int err;
+ size_t buff_len;
+ u8 model_index;
+ u8 *desc_buf;
+
+ buff_len = max_t(size_t, hba->desc_size.dev_desc,
+ QUERY_DESC_MAX_SIZE + 1);
+ desc_buf = kmalloc(buff_len, GFP_KERNEL);
+ if (!desc_buf) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
+ if (err) {
+ dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ /*
+ * getting vendor (manufacturerID) and Bank Index in big endian
+ * format
+ */
+ dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
+ desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
+
+ model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
+
+ /* Zero-pad entire buffer for string termination. */
+ memset(desc_buf, 0, buff_len);
+
+ err = ufshcd_read_string_desc(hba, model_index, desc_buf,
+ QUERY_DESC_MAX_SIZE, true/*ASCII*/);
+ if (err) {
+ dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
+ strlcpy(dev_desc->model, (desc_buf + QUERY_DESC_HDR_SIZE),
+ min_t(u8, desc_buf[QUERY_DESC_LENGTH_OFFSET],
+ MAX_MODEL_LEN));
+
+ /* Null terminate the model string */
+ dev_desc->model[MAX_MODEL_LEN] = '\0';
+
+out:
+ kfree(desc_buf);
+ return err;
+}
+
+static void ufs_fixup_device_setup(struct ufs_hba *hba,
+ struct ufs_dev_desc *dev_desc)
+{
+ struct ufs_dev_fix *f;
+
+ for (f = ufs_fixups; f->quirk; f++) {
+ if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid ||
+ f->card.wmanufacturerid == UFS_ANY_VENDOR) &&
+ (STR_PRFX_EQUAL(f->card.model, dev_desc->model) ||
+ !strcmp(f->card.model, UFS_ANY_MODEL)))
+ hba->dev_quirks |= f->quirk;
+ }
+}
+
+/**
+ * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
+ * @hba: per-adapter instance
+ *
+ * PA_TActivate parameter can be tuned manually if UniPro version is less than
+ * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
+ * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
+ * the hibern8 exit latency.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
+{
+ int ret = 0;
+ u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
+
+ ret = ufshcd_dme_peer_get(hba,
+ UIC_ARG_MIB_SEL(
+ RX_MIN_ACTIVATETIME_CAPABILITY,
+ UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
+ &peer_rx_min_activatetime);
+ if (ret)
+ goto out;
+
+ /* make sure proper unit conversion is applied */
+ tuned_pa_tactivate =
+ ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
+ / PA_TACTIVATE_TIME_UNIT_US);
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
+ tuned_pa_tactivate);
+
+out:
+ return ret;
+}
+
+/**
+ * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
+ * @hba: per-adapter instance
+ *
+ * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
+ * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
+ * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
+ * This optimal value can help reduce the hibern8 exit latency.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
+{
+ int ret = 0;
+ u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
+ u32 max_hibern8_time, tuned_pa_hibern8time;
+
+ ret = ufshcd_dme_get(hba,
+ UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
+ UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
+ &local_tx_hibern8_time_cap);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_dme_peer_get(hba,
+ UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
+ UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
+ &peer_rx_hibern8_time_cap);
+ if (ret)
+ goto out;
+
+ max_hibern8_time = max(local_tx_hibern8_time_cap,
+ peer_rx_hibern8_time_cap);
+ /* make sure proper unit conversion is applied */
+ tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
+ / PA_HIBERN8_TIME_UNIT_US);
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
+ tuned_pa_hibern8time);
+out:
+ return ret;
+}
+
+/**
+ * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
+ * less than device PA_TACTIVATE time.
+ * @hba: per-adapter instance
+ *
+ * Some UFS devices require host PA_TACTIVATE to be lower than device
+ * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
+ * for such devices.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
+{
+ int ret = 0;
+ u32 granularity, peer_granularity;
+ u32 pa_tactivate, peer_pa_tactivate;
+ u32 pa_tactivate_us, peer_pa_tactivate_us;
+ u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
+ &granularity);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
+ &peer_granularity);
+ if (ret)
+ goto out;
+
+ if ((granularity < PA_GRANULARITY_MIN_VAL) ||
+ (granularity > PA_GRANULARITY_MAX_VAL)) {
+ dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
+ __func__, granularity);
+ return -EINVAL;
+ }
+
+ if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
+ (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
+ dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
+ __func__, peer_granularity);
+ return -EINVAL;
+ }
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
+ &peer_pa_tactivate);
+ if (ret)
+ goto out;
+
+ pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
+ peer_pa_tactivate_us = peer_pa_tactivate *
+ gran_to_us_table[peer_granularity - 1];
+
+ if (pa_tactivate_us > peer_pa_tactivate_us) {
+ u32 new_peer_pa_tactivate;
+
+ new_peer_pa_tactivate = pa_tactivate_us /
+ gran_to_us_table[peer_granularity - 1];
+ new_peer_pa_tactivate++;
+ ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
+ new_peer_pa_tactivate);
+ }
+
+out:
+ return ret;
+}
+
+static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
+{
+ if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
+ ufshcd_tune_pa_tactivate(hba);
+ ufshcd_tune_pa_hibern8time(hba);
+ }
+
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
+ /* set 1ms timeout for PA_TACTIVATE */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
+
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
+ ufshcd_quirk_tune_host_pa_tactivate(hba);
+
+ ufshcd_vops_apply_dev_quirks(hba);
+}
+
+static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
+{
+ int err_reg_hist_size = sizeof(struct ufs_uic_err_reg_hist);
+
+ hba->ufs_stats.hibern8_exit_cnt = 0;
+ hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
+
+ memset(&hba->ufs_stats.pa_err, 0, err_reg_hist_size);
+ memset(&hba->ufs_stats.dl_err, 0, err_reg_hist_size);
+ memset(&hba->ufs_stats.nl_err, 0, err_reg_hist_size);
+ memset(&hba->ufs_stats.tl_err, 0, err_reg_hist_size);
+ memset(&hba->ufs_stats.dme_err, 0, err_reg_hist_size);
+
+ hba->req_abort_count = 0;
+}
+
+static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
+{
+ int err;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
+ &hba->desc_size.dev_desc);
+ if (err)
+ hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
+ &hba->desc_size.pwr_desc);
+ if (err)
+ hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
+ &hba->desc_size.interc_desc);
+ if (err)
+ hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
+ &hba->desc_size.conf_desc);
+ if (err)
+ hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
+ &hba->desc_size.unit_desc);
+ if (err)
+ hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
+ &hba->desc_size.geom_desc);
+ if (err)
+ hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0,
+ &hba->desc_size.hlth_desc);
+ if (err)
+ hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
+}
+
+static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
+{
+ hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
+ hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
+ hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
+ hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
+ hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
+ hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
+ hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
+}
+
+/**
+ * ufshcd_probe_hba - probe hba to detect device and initialize
+ * @hba: per-adapter instance
+ *
+ * Execute link-startup and verify device initialization
+ */
+static int ufshcd_probe_hba(struct ufs_hba *hba)
+{
+ struct ufs_dev_desc card = {0};
+ int ret;
+ ktime_t start = ktime_get();
+
+ ret = ufshcd_link_startup(hba);
+ if (ret)
+ goto out;
+
+ /* set the default level for urgent bkops */
+ hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
+ hba->is_urgent_bkops_lvl_checked = false;
+
+ /* Debug counters initialization */
+ ufshcd_clear_dbg_ufs_stats(hba);
+
+ /* UniPro link is active now */
+ ufshcd_set_link_active(hba);
+
+ /* Enable Auto-Hibernate if configured */
+ ufshcd_auto_hibern8_enable(hba);
+
+ ret = ufshcd_verify_dev_init(hba);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_complete_dev_init(hba);
+ if (ret)
+ goto out;
+
+ /* Init check for device descriptor sizes */
+ ufshcd_init_desc_sizes(hba);
+
+ ret = ufs_get_device_desc(hba, &card);
+ if (ret) {
+ dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
+ __func__, ret);
+ goto out;
+ }
+
+ ufs_fixup_device_setup(hba, &card);
+ ufshcd_tune_unipro_params(hba);
+
+ ret = ufshcd_set_vccq_rail_unused(hba,
+ (hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false);
+ if (ret)
+ goto out;
+
+ /* UFS device is also active now */
+ ufshcd_set_ufs_dev_active(hba);
+ ufshcd_force_reset_auto_bkops(hba);
+ hba->wlun_dev_clr_ua = true;
+
+ if (ufshcd_get_max_pwr_mode(hba)) {
+ dev_err(hba->dev,
+ "%s: Failed getting max supported power mode\n",
+ __func__);
+ } else {
+ ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
+ if (ret) {
+ dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
+ __func__, ret);
+ goto out;
+ }
+ }
+
+ /* set the state as operational after switching to desired gear */
+ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+
+ /*
+ * If we are in error handling context or in power management callbacks
+ * context, no need to scan the host
+ */
+ if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
+ bool flag;
+
+ /* clear any previous UFS device information */
+ memset(&hba->dev_info, 0, sizeof(hba->dev_info));
+ if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
+ QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
+ hba->dev_info.f_power_on_wp_en = flag;
+
+ if (!hba->is_init_prefetch)
+ ufshcd_init_icc_levels(hba);
+
+ /* Add required well known logical units to scsi mid layer */
+ if (ufshcd_scsi_add_wlus(hba))
+ goto out;
+
+ /* Initialize devfreq after UFS device is detected */
+ if (ufshcd_is_clkscaling_supported(hba)) {
+ memcpy(&hba->clk_scaling.saved_pwr_info.info,
+ &hba->pwr_info,
+ sizeof(struct ufs_pa_layer_attr));
+ hba->clk_scaling.saved_pwr_info.is_valid = true;
+ if (!hba->devfreq) {
+ ret = ufshcd_devfreq_init(hba);
+ if (ret)
+ goto out;
+ }
+ hba->clk_scaling.is_allowed = true;
+ }
+
+ scsi_scan_host(hba->host);
+ pm_runtime_put_sync(hba->dev);
+ }
+
+ if (!hba->is_init_prefetch)
+ hba->is_init_prefetch = true;
+
+out:
+ /*
+ * If we failed to initialize the device or the device is not
+ * present, turn off the power/clocks etc.
+ */
+ if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
+ pm_runtime_put_sync(hba->dev);
+ ufshcd_hba_exit(hba);
+ }
+
+ trace_ufshcd_init(dev_name(hba->dev), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)),
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
+ return ret;
+}
+
+/**
+ * ufshcd_async_scan - asynchronous execution for probing hba
+ * @data: data pointer to pass to this function
+ * @cookie: cookie data
+ */
+static void ufshcd_async_scan(void *data, async_cookie_t cookie)
+{
+ struct ufs_hba *hba = (struct ufs_hba *)data;
+
+ ufshcd_probe_hba(hba);
+}
+
+static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
+{
+ unsigned long flags;
+ struct Scsi_Host *host;
+ struct ufs_hba *hba;
+ int index;
+ bool found = false;
+
+ if (!scmd || !scmd->device || !scmd->device->host)
+ return BLK_EH_DONE;
+
+ host = scmd->device->host;
+ hba = shost_priv(host);
+ if (!hba)
+ return BLK_EH_DONE;
+
+ spin_lock_irqsave(host->host_lock, flags);
+
+ for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
+ if (hba->lrb[index].cmd == scmd) {
+ found = true;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(host->host_lock, flags);
+
+ /*
+ * Bypass SCSI error handling and reset the block layer timer if this
+ * SCSI command was not actually dispatched to UFS driver, otherwise
+ * let SCSI layer handle the error as usual.
+ */
+ return found ? BLK_EH_DONE : BLK_EH_RESET_TIMER;
+}
+
+static const struct attribute_group *ufshcd_driver_groups[] = {
+ &ufs_sysfs_unit_descriptor_group,
+ &ufs_sysfs_lun_attributes_group,
+ NULL,
+};
+
+static struct scsi_host_template ufshcd_driver_template = {
+ .module = THIS_MODULE,
+ .name = UFSHCD,
+ .proc_name = UFSHCD,
+ .queuecommand = ufshcd_queuecommand,
+ .slave_alloc = ufshcd_slave_alloc,
+ .slave_configure = ufshcd_slave_configure,
+ .slave_destroy = ufshcd_slave_destroy,
+ .change_queue_depth = ufshcd_change_queue_depth,
+ .eh_abort_handler = ufshcd_abort,
+ .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
+ .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
+ .eh_timed_out = ufshcd_eh_timed_out,
+ .this_id = -1,
+ .sg_tablesize = SG_ALL,
+ .cmd_per_lun = UFSHCD_CMD_PER_LUN,
+ .can_queue = UFSHCD_CAN_QUEUE,
+ .max_host_blocked = 1,
+ .track_queue_depth = 1,
+ .sdev_groups = ufshcd_driver_groups,
+ .no_write_same = 1,
+};
+
+static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
+ int ua)
+{
+ int ret;
+
+ if (!vreg)
+ return 0;
+
+ ret = regulator_set_load(vreg->reg, ua);
+ if (ret < 0) {
+ dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
+ __func__, vreg->name, ua, ret);
+ }
+
+ return ret;
+}
+
+static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
+ struct ufs_vreg *vreg)
+{
+ if (!vreg)
+ return 0;
+ else if (vreg->unused)
+ return 0;
+ else
+ return ufshcd_config_vreg_load(hba->dev, vreg,
+ UFS_VREG_LPM_LOAD_UA);
+}
+
+static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
+ struct ufs_vreg *vreg)
+{
+ if (!vreg)
+ return 0;
+ else if (vreg->unused)
+ return 0;
+ else
+ return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
+}
+
+static int ufshcd_config_vreg(struct device *dev,
+ struct ufs_vreg *vreg, bool on)
+{
+ int ret = 0;
+ struct regulator *reg;
+ const char *name;
+ int min_uV, uA_load;
+
+ BUG_ON(!vreg);
+
+ reg = vreg->reg;
+ name = vreg->name;
+
+ if (regulator_count_voltages(reg) > 0) {
+ min_uV = on ? vreg->min_uV : 0;
+ ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
+ if (ret) {
+ dev_err(dev, "%s: %s set voltage failed, err=%d\n",
+ __func__, name, ret);
+ goto out;
+ }
+
+ uA_load = on ? vreg->max_uA : 0;
+ ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
+ if (ret)
+ goto out;
+ }
+out:
+ return ret;
+}
+
+static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
+{
+ int ret = 0;
+
+ if (!vreg)
+ goto out;
+ else if (vreg->enabled || vreg->unused)
+ goto out;
+
+ ret = ufshcd_config_vreg(dev, vreg, true);
+ if (!ret)
+ ret = regulator_enable(vreg->reg);
+
+ if (!ret)
+ vreg->enabled = true;
+ else
+ dev_err(dev, "%s: %s enable failed, err=%d\n",
+ __func__, vreg->name, ret);
+out:
+ return ret;
+}
+
+static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
+{
+ int ret = 0;
+
+ if (!vreg)
+ goto out;
+ else if (!vreg->enabled || vreg->unused)
+ goto out;
+
+ ret = regulator_disable(vreg->reg);
+
+ if (!ret) {
+ /* ignore errors on applying disable config */
+ ufshcd_config_vreg(dev, vreg, false);
+ vreg->enabled = false;
+ } else {
+ dev_err(dev, "%s: %s disable failed, err=%d\n",
+ __func__, vreg->name, ret);
+ }
+out:
+ return ret;
+}
+
+static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
+{
+ int ret = 0;
+ struct device *dev = hba->dev;
+ struct ufs_vreg_info *info = &hba->vreg_info;
+
+ if (!info)
+ goto out;
+
+ ret = ufshcd_toggle_vreg(dev, info->vcc, on);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_toggle_vreg(dev, info->vccq, on);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
+ if (ret)
+ goto out;
+
+out:
+ if (ret) {
+ ufshcd_toggle_vreg(dev, info->vccq2, false);
+ ufshcd_toggle_vreg(dev, info->vccq, false);
+ ufshcd_toggle_vreg(dev, info->vcc, false);
+ }
+ return ret;
+}
+
+static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
+{
+ struct ufs_vreg_info *info = &hba->vreg_info;
+
+ if (info)
+ return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
+
+ return 0;
+}
+
+static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
+{
+ int ret = 0;
+
+ if (!vreg)
+ goto out;
+
+ vreg->reg = devm_regulator_get(dev, vreg->name);
+ if (IS_ERR(vreg->reg)) {
+ ret = PTR_ERR(vreg->reg);
+ dev_err(dev, "%s: %s get failed, err=%d\n",
+ __func__, vreg->name, ret);
+ }
+out:
+ return ret;
+}
+
+static int ufshcd_init_vreg(struct ufs_hba *hba)
+{
+ int ret = 0;
+ struct device *dev = hba->dev;
+ struct ufs_vreg_info *info = &hba->vreg_info;
+
+ if (!info)
+ goto out;
+
+ ret = ufshcd_get_vreg(dev, info->vcc);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_get_vreg(dev, info->vccq);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_get_vreg(dev, info->vccq2);
+out:
+ return ret;
+}
+
+static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
+{
+ struct ufs_vreg_info *info = &hba->vreg_info;
+
+ if (info)
+ return ufshcd_get_vreg(hba->dev, info->vdd_hba);
+
+ return 0;
+}
+
+static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused)
+{
+ int ret = 0;
+ struct ufs_vreg_info *info = &hba->vreg_info;
+
+ if (!info)
+ goto out;
+ else if (!info->vccq)
+ goto out;
+
+ if (unused) {
+ /* shut off the rail here */
+ ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false);
+ /*
+ * Mark this rail as no longer used, so it doesn't get enabled
+ * later by mistake
+ */
+ if (!ret)
+ info->vccq->unused = true;
+ } else {
+ /*
+ * rail should have been already enabled hence just make sure
+ * that unused flag is cleared.
+ */
+ info->vccq->unused = false;
+ }
+out:
+ return ret;
+}
+
+static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
+ bool skip_ref_clk)
+{
+ int ret = 0;
+ struct ufs_clk_info *clki;
+ struct list_head *head = &hba->clk_list_head;
+ unsigned long flags;
+ ktime_t start = ktime_get();
+ bool clk_state_changed = false;
+
+ if (list_empty(head))
+ goto out;
+
+ /*
+ * vendor specific setup_clocks ops may depend on clocks managed by
+ * this standard driver hence call the vendor specific setup_clocks
+ * before disabling the clocks managed here.
+ */
+ if (!on) {
+ ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
+ if (ret)
+ return ret;
+ }
+
+ list_for_each_entry(clki, head, list) {
+ if (!IS_ERR_OR_NULL(clki->clk)) {
+ if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
+ continue;
+
+ clk_state_changed = on ^ clki->enabled;
+ if (on && !clki->enabled) {
+ ret = clk_prepare_enable(clki->clk);
+ if (ret) {
+ dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
+ __func__, clki->name, ret);
+ goto out;
+ }
+ } else if (!on && clki->enabled) {
+ clk_disable_unprepare(clki->clk);
+ }
+ clki->enabled = on;
+ dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
+ clki->name, on ? "en" : "dis");
+ }
+ }
+
+ /*
+ * vendor specific setup_clocks ops may depend on clocks managed by
+ * this standard driver hence call the vendor specific setup_clocks
+ * after enabling the clocks managed here.
+ */
+ if (on) {
+ ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
+ if (ret)
+ return ret;
+ }
+
+out:
+ if (ret) {
+ list_for_each_entry(clki, head, list) {
+ if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
+ clk_disable_unprepare(clki->clk);
+ }
+ } else if (!ret && on) {
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->clk_gating.state = CLKS_ON;
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
+ hba->clk_gating.state);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ }
+
+ if (clk_state_changed)
+ trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
+ (on ? "on" : "off"),
+ ktime_to_us(ktime_sub(ktime_get(), start)), ret);
+ return ret;
+}
+
+static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
+{
+ return __ufshcd_setup_clocks(hba, on, false);
+}
+
+static int ufshcd_init_clocks(struct ufs_hba *hba)
+{
+ int ret = 0;
+ struct ufs_clk_info *clki;
+ struct device *dev = hba->dev;
+ struct list_head *head = &hba->clk_list_head;
+
+ if (list_empty(head))
+ goto out;
+
+ list_for_each_entry(clki, head, list) {
+ if (!clki->name)
+ continue;
+
+ clki->clk = devm_clk_get(dev, clki->name);
+ if (IS_ERR(clki->clk)) {
+ ret = PTR_ERR(clki->clk);
+ dev_err(dev, "%s: %s clk get failed, %d\n",
+ __func__, clki->name, ret);
+ goto out;
+ }
+
+ if (clki->max_freq) {
+ ret = clk_set_rate(clki->clk, clki->max_freq);
+ if (ret) {
+ dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
+ __func__, clki->name,
+ clki->max_freq, ret);
+ goto out;
+ }
+ clki->curr_freq = clki->max_freq;
+ }
+ dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
+ clki->name, clk_get_rate(clki->clk));
+ }
+out:
+ return ret;
+}
+
+static int ufshcd_variant_hba_init(struct ufs_hba *hba)
+{
+ int err = 0;
+
+ if (!hba->vops)
+ goto out;
+
+ err = ufshcd_vops_init(hba);
+ if (err)
+ goto out;
+
+ err = ufshcd_vops_setup_regulators(hba, true);
+ if (err)
+ goto out_exit;
+
+ goto out;
+
+out_exit:
+ ufshcd_vops_exit(hba);
+out:
+ if (err)
+ dev_err(hba->dev, "%s: variant %s init failed err %d\n",
+ __func__, ufshcd_get_var_name(hba), err);
+ return err;
+}
+
+static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
+{
+ if (!hba->vops)
+ return;
+
+ ufshcd_vops_setup_regulators(hba, false);
+
+ ufshcd_vops_exit(hba);
+}
+
+static int ufshcd_hba_init(struct ufs_hba *hba)
+{
+ int err;
+
+ /*
+ * Handle host controller power separately from the UFS device power
+ * rails as it will help controlling the UFS host controller power
+ * collapse easily which is different than UFS device power collapse.
+ * Also, enable the host controller power before we go ahead with rest
+ * of the initialization here.
+ */
+ err = ufshcd_init_hba_vreg(hba);
+ if (err)
+ goto out;
+
+ err = ufshcd_setup_hba_vreg(hba, true);
+ if (err)
+ goto out;
+
+ err = ufshcd_init_clocks(hba);
+ if (err)
+ goto out_disable_hba_vreg;
+
+ err = ufshcd_setup_clocks(hba, true);
+ if (err)
+ goto out_disable_hba_vreg;
+
+ err = ufshcd_init_vreg(hba);
+ if (err)
+ goto out_disable_clks;
+
+ err = ufshcd_setup_vreg(hba, true);
+ if (err)
+ goto out_disable_clks;
+
+ err = ufshcd_variant_hba_init(hba);
+ if (err)
+ goto out_disable_vreg;
+
+ hba->is_powered = true;
+ goto out;
+
+out_disable_vreg:
+ ufshcd_setup_vreg(hba, false);
+out_disable_clks:
+ ufshcd_setup_clocks(hba, false);
+out_disable_hba_vreg:
+ ufshcd_setup_hba_vreg(hba, false);
+out:
+ return err;
+}
+
+static void ufshcd_hba_exit(struct ufs_hba *hba)
+{
+ if (hba->is_powered) {
+ ufshcd_variant_hba_exit(hba);
+ ufshcd_setup_vreg(hba, false);
+ ufshcd_suspend_clkscaling(hba);
+ if (ufshcd_is_clkscaling_supported(hba)) {
+ if (hba->devfreq)
+ ufshcd_suspend_clkscaling(hba);
+ destroy_workqueue(hba->clk_scaling.workq);
+ ufshcd_devfreq_remove(hba);
+ }
+ ufshcd_setup_clocks(hba, false);
+ ufshcd_setup_hba_vreg(hba, false);
+ hba->is_powered = false;
+ }
+}
+
+static int
+ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
+{
+ unsigned char cmd[6] = {REQUEST_SENSE,
+ 0,
+ 0,
+ 0,
+ UFSHCD_REQ_SENSE_SIZE,
+ 0};
+ char *buffer;
+ int ret;
+
+ buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL);
+ if (!buffer) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer,
+ UFSHCD_REQ_SENSE_SIZE, NULL, NULL,
+ msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL);
+ if (ret)
+ pr_err("%s: failed with err %d\n", __func__, ret);
+
+ kfree(buffer);
+out:
+ return ret;
+}
+
+/**
+ * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
+ * power mode
+ * @hba: per adapter instance
+ * @pwr_mode: device power mode to set
+ *
+ * Returns 0 if requested power mode is set successfully
+ * Returns non-zero if failed to set the requested power mode
+ */
+static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
+ enum ufs_dev_pwr_mode pwr_mode)
+{
+ unsigned char cmd[6] = { START_STOP };
+ struct scsi_sense_hdr sshdr;
+ struct scsi_device *sdp;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ sdp = hba->sdev_ufs_device;
+ if (sdp) {
+ ret = scsi_device_get(sdp);
+ if (!ret && !scsi_device_online(sdp)) {
+ ret = -ENODEV;
+ scsi_device_put(sdp);
+ }
+ } else {
+ ret = -ENODEV;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ if (ret)
+ return ret;
+
+ /*
+ * If scsi commands fail, the scsi mid-layer schedules scsi error-
+ * handling, which would wait for host to be resumed. Since we know
+ * we are functional while we are here, skip host resume in error
+ * handling context.
+ */
+ hba->host->eh_noresume = 1;
+ if (hba->wlun_dev_clr_ua) {
+ ret = ufshcd_send_request_sense(hba, sdp);
+ if (ret)
+ goto out;
+ /* Unit attention condition is cleared now */
+ hba->wlun_dev_clr_ua = false;
+ }
+
+ cmd[4] = pwr_mode << 4;
+
+ /*
+ * Current function would be generally called from the power management
+ * callbacks hence set the RQF_PM flag so that it doesn't resume the
+ * already suspended childs.
+ */
+ ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
+ START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
+ if (ret) {
+ sdev_printk(KERN_WARNING, sdp,
+ "START_STOP failed for power mode: %d, result %x\n",
+ pwr_mode, ret);
+ if (driver_byte(ret) & DRIVER_SENSE)
+ scsi_print_sense_hdr(sdp, NULL, &sshdr);
+ }
+
+ if (!ret)
+ hba->curr_dev_pwr_mode = pwr_mode;
+out:
+ scsi_device_put(sdp);
+ hba->host->eh_noresume = 0;
+ return ret;
+}
+
+static int ufshcd_link_state_transition(struct ufs_hba *hba,
+ enum uic_link_state req_link_state,
+ int check_for_bkops)
+{
+ int ret = 0;
+
+ if (req_link_state == hba->uic_link_state)
+ return 0;
+
+ if (req_link_state == UIC_LINK_HIBERN8_STATE) {
+ ret = ufshcd_uic_hibern8_enter(hba);
+ if (!ret)
+ ufshcd_set_link_hibern8(hba);
+ else
+ goto out;
+ }
+ /*
+ * If autobkops is enabled, link can't be turned off because
+ * turning off the link would also turn off the device.
+ */
+ else if ((req_link_state == UIC_LINK_OFF_STATE) &&
+ (!check_for_bkops || (check_for_bkops &&
+ !hba->auto_bkops_enabled))) {
+ /*
+ * Let's make sure that link is in low power mode, we are doing
+ * this currently by putting the link in Hibern8. Otherway to
+ * put the link in low power mode is to send the DME end point
+ * to device and then send the DME reset command to local
+ * unipro. But putting the link in hibern8 is much faster.
+ */
+ ret = ufshcd_uic_hibern8_enter(hba);
+ if (ret)
+ goto out;
+ /*
+ * Change controller state to "reset state" which
+ * should also put the link in off/reset state
+ */
+ ufshcd_hba_stop(hba, true);
+ /*
+ * TODO: Check if we need any delay to make sure that
+ * controller is reset
+ */
+ ufshcd_set_link_off(hba);
+ }
+
+out:
+ return ret;
+}
+
+static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
+{
+ /*
+ * It seems some UFS devices may keep drawing more than sleep current
+ * (atleast for 500us) from UFS rails (especially from VCCQ rail).
+ * To avoid this situation, add 2ms delay before putting these UFS
+ * rails in LPM mode.
+ */
+ if (!ufshcd_is_link_active(hba) &&
+ hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
+ usleep_range(2000, 2100);
+
+ /*
+ * If UFS device is either in UFS_Sleep turn off VCC rail to save some
+ * power.
+ *
+ * If UFS device and link is in OFF state, all power supplies (VCC,
+ * VCCQ, VCCQ2) can be turned off if power on write protect is not
+ * required. If UFS link is inactive (Hibern8 or OFF state) and device
+ * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
+ *
+ * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
+ * in low power state which would save some power.
+ */
+ if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
+ !hba->dev_info.is_lu_power_on_wp) {
+ ufshcd_setup_vreg(hba, false);
+ } else if (!ufshcd_is_ufs_dev_active(hba)) {
+ ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
+ if (!ufshcd_is_link_active(hba)) {
+ ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
+ ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
+ }
+ }
+}
+
+static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
+{
+ int ret = 0;
+
+ if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
+ !hba->dev_info.is_lu_power_on_wp) {
+ ret = ufshcd_setup_vreg(hba, true);
+ } else if (!ufshcd_is_ufs_dev_active(hba)) {
+ if (!ret && !ufshcd_is_link_active(hba)) {
+ ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
+ if (ret)
+ goto vcc_disable;
+ ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
+ if (ret)
+ goto vccq_lpm;
+ }
+ ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
+ }
+ goto out;
+
+vccq_lpm:
+ ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
+vcc_disable:
+ ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
+out:
+ return ret;
+}
+
+static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
+{
+ if (ufshcd_is_link_off(hba))
+ ufshcd_setup_hba_vreg(hba, false);
+}
+
+static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
+{
+ if (ufshcd_is_link_off(hba))
+ ufshcd_setup_hba_vreg(hba, true);
+}
+
+/**
+ * ufshcd_suspend - helper function for suspend operations
+ * @hba: per adapter instance
+ * @pm_op: desired low power operation type
+ *
+ * This function will try to put the UFS device and link into low power
+ * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
+ * (System PM level).
+ *
+ * If this function is called during shutdown, it will make sure that
+ * both UFS device and UFS link is powered off.
+ *
+ * NOTE: UFS device & link must be active before we enter in this function.
+ *
+ * Returns 0 for success and non-zero for failure
+ */
+static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+{
+ int ret = 0;
+ enum ufs_pm_level pm_lvl;
+ enum ufs_dev_pwr_mode req_dev_pwr_mode;
+ enum uic_link_state req_link_state;
+
+ hba->pm_op_in_progress = 1;
+ if (!ufshcd_is_shutdown_pm(pm_op)) {
+ pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
+ hba->rpm_lvl : hba->spm_lvl;
+ req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
+ req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
+ } else {
+ req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
+ req_link_state = UIC_LINK_OFF_STATE;
+ }
+
+ /*
+ * If we can't transition into any of the low power modes
+ * just gate the clocks.
+ */
+ ufshcd_hold(hba, false);
+ hba->clk_gating.is_suspended = true;
+
+ if (hba->clk_scaling.is_allowed) {
+ cancel_work_sync(&hba->clk_scaling.suspend_work);
+ cancel_work_sync(&hba->clk_scaling.resume_work);
+ ufshcd_suspend_clkscaling(hba);
+ }
+
+ if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
+ req_link_state == UIC_LINK_ACTIVE_STATE) {
+ goto disable_clks;
+ }
+
+ if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
+ (req_link_state == hba->uic_link_state))
+ goto enable_gating;
+
+ /* UFS device & link must be active before we enter in this function */
+ if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
+ ret = -EINVAL;
+ goto enable_gating;
+ }
+
+ if (ufshcd_is_runtime_pm(pm_op)) {
+ if (ufshcd_can_autobkops_during_suspend(hba)) {
+ /*
+ * The device is idle with no requests in the queue,
+ * allow background operations if bkops status shows
+ * that performance might be impacted.
+ */
+ ret = ufshcd_urgent_bkops(hba);
+ if (ret)
+ goto enable_gating;
+ } else {
+ /* make sure that auto bkops is disabled */
+ ufshcd_disable_auto_bkops(hba);
+ }
+ }
+
+ if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
+ ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
+ !ufshcd_is_runtime_pm(pm_op))) {
+ /* ensure that bkops is disabled */
+ ufshcd_disable_auto_bkops(hba);
+ ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
+ if (ret)
+ goto enable_gating;
+ }
+
+ ret = ufshcd_link_state_transition(hba, req_link_state, 1);
+ if (ret)
+ goto set_dev_active;
+
+ ufshcd_vreg_set_lpm(hba);
+
+disable_clks:
+ /*
+ * Call vendor specific suspend callback. As these callbacks may access
+ * vendor specific host controller register space call them before the
+ * host clocks are ON.
+ */
+ ret = ufshcd_vops_suspend(hba, pm_op);
+ if (ret)
+ goto set_link_active;
+
+ if (!ufshcd_is_link_active(hba))
+ ufshcd_setup_clocks(hba, false);
+ else
+ /* If link is active, device ref_clk can't be switched off */
+ __ufshcd_setup_clocks(hba, false, true);
+
+ hba->clk_gating.state = CLKS_OFF;
+ trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
+ /*
+ * Disable the host irq as host controller as there won't be any
+ * host controller transaction expected till resume.
+ */
+ ufshcd_disable_irq(hba);
+ /* Put the host controller in low power mode if possible */
+ ufshcd_hba_vreg_set_lpm(hba);
+ goto out;
+
+set_link_active:
+ if (hba->clk_scaling.is_allowed)
+ ufshcd_resume_clkscaling(hba);
+ ufshcd_vreg_set_hpm(hba);
+ if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
+ ufshcd_set_link_active(hba);
+ else if (ufshcd_is_link_off(hba))
+ ufshcd_host_reset_and_restore(hba);
+set_dev_active:
+ if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
+ ufshcd_disable_auto_bkops(hba);
+enable_gating:
+ if (hba->clk_scaling.is_allowed)
+ ufshcd_resume_clkscaling(hba);
+ hba->clk_gating.is_suspended = false;
+ ufshcd_release(hba);
+out:
+ hba->pm_op_in_progress = 0;
+ return ret;
+}
+
+/**
+ * ufshcd_resume - helper function for resume operations
+ * @hba: per adapter instance
+ * @pm_op: runtime PM or system PM
+ *
+ * This function basically brings the UFS device, UniPro link and controller
+ * to active state.
+ *
+ * Returns 0 for success and non-zero for failure
+ */
+static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+{
+ int ret;
+ enum uic_link_state old_link_state;
+
+ hba->pm_op_in_progress = 1;
+ old_link_state = hba->uic_link_state;
+
+ ufshcd_hba_vreg_set_hpm(hba);
+ /* Make sure clocks are enabled before accessing controller */
+ ret = ufshcd_setup_clocks(hba, true);
+ if (ret)
+ goto out;
+
+ /* enable the host irq as host controller would be active soon */
+ ret = ufshcd_enable_irq(hba);
+ if (ret)
+ goto disable_irq_and_vops_clks;
+
+ ret = ufshcd_vreg_set_hpm(hba);
+ if (ret)
+ goto disable_irq_and_vops_clks;
+
+ /*
+ * Call vendor specific resume callback. As these callbacks may access
+ * vendor specific host controller register space call them when the
+ * host clocks are ON.
+ */
+ ret = ufshcd_vops_resume(hba, pm_op);
+ if (ret)
+ goto disable_vreg;
+
+ if (ufshcd_is_link_hibern8(hba)) {
+ ret = ufshcd_uic_hibern8_exit(hba);
+ if (!ret)
+ ufshcd_set_link_active(hba);
+ else
+ goto vendor_suspend;
+ } else if (ufshcd_is_link_off(hba)) {
+ ret = ufshcd_host_reset_and_restore(hba);
+ /*
+ * ufshcd_host_reset_and_restore() should have already
+ * set the link state as active
+ */
+ if (ret || !ufshcd_is_link_active(hba))
+ goto vendor_suspend;
+ }
+
+ if (!ufshcd_is_ufs_dev_active(hba)) {
+ ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
+ if (ret)
+ goto set_old_link_state;
+ }
+
+ if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
+ ufshcd_enable_auto_bkops(hba);
+ else
+ /*
+ * If BKOPs operations are urgently needed at this moment then
+ * keep auto-bkops enabled or else disable it.
+ */
+ ufshcd_urgent_bkops(hba);
+
+ hba->clk_gating.is_suspended = false;
+
+ if (hba->clk_scaling.is_allowed)
+ ufshcd_resume_clkscaling(hba);
+
+ /* Schedule clock gating in case of no access to UFS device yet */
+ ufshcd_release(hba);
+
+ /* Enable Auto-Hibernate if configured */
+ ufshcd_auto_hibern8_enable(hba);
+
+ goto out;
+
+set_old_link_state:
+ ufshcd_link_state_transition(hba, old_link_state, 0);
+vendor_suspend:
+ ufshcd_vops_suspend(hba, pm_op);
+disable_vreg:
+ ufshcd_vreg_set_lpm(hba);
+disable_irq_and_vops_clks:
+ ufshcd_disable_irq(hba);
+ if (hba->clk_scaling.is_allowed)
+ ufshcd_suspend_clkscaling(hba);
+ ufshcd_setup_clocks(hba, false);
+out:
+ hba->pm_op_in_progress = 0;
+ return ret;
+}
+
+/**
+ * ufshcd_system_suspend - system suspend routine
+ * @hba: per adapter instance
+ *
+ * Check the description of ufshcd_suspend() function for more details.
+ *
+ * Returns 0 for success and non-zero for failure
+ */
+int ufshcd_system_suspend(struct ufs_hba *hba)
+{
+ int ret = 0;
+ ktime_t start = ktime_get();
+
+ if (!hba || !hba->is_powered)
+ return 0;
+
+ if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
+ hba->curr_dev_pwr_mode) &&
+ (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
+ hba->uic_link_state))
+ goto out;
+
+ if (pm_runtime_suspended(hba->dev)) {
+ /*
+ * UFS device and/or UFS link low power states during runtime
+ * suspend seems to be different than what is expected during
+ * system suspend. Hence runtime resume the devic & link and
+ * let the system suspend low power states to take effect.
+ * TODO: If resume takes longer time, we might have optimize
+ * it in future by not resuming everything if possible.
+ */
+ ret = ufshcd_runtime_resume(hba);
+ if (ret)
+ goto out;
+ }
+
+ ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
+out:
+ trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)),
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
+ if (!ret)
+ hba->is_sys_suspended = true;
+ return ret;
+}
+EXPORT_SYMBOL(ufshcd_system_suspend);
+
+/**
+ * ufshcd_system_resume - system resume routine
+ * @hba: per adapter instance
+ *
+ * Returns 0 for success and non-zero for failure
+ */
+
+int ufshcd_system_resume(struct ufs_hba *hba)
+{
+ int ret = 0;
+ ktime_t start = ktime_get();
+
+ if (!hba)
+ return -EINVAL;
+
+ if (!hba->is_powered || pm_runtime_suspended(hba->dev))
+ /*
+ * Let the runtime resume take care of resuming
+ * if runtime suspended.
+ */
+ goto out;
+ else
+ ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
+out:
+ trace_ufshcd_system_resume(dev_name(hba->dev), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)),
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
+ return ret;
+}
+EXPORT_SYMBOL(ufshcd_system_resume);
+
+/**
+ * ufshcd_runtime_suspend - runtime suspend routine
+ * @hba: per adapter instance
+ *
+ * Check the description of ufshcd_suspend() function for more details.
+ *
+ * Returns 0 for success and non-zero for failure
+ */
+int ufshcd_runtime_suspend(struct ufs_hba *hba)
+{
+ int ret = 0;
+ ktime_t start = ktime_get();
+
+ if (!hba)
+ return -EINVAL;
+
+ if (!hba->is_powered)
+ goto out;
+ else
+ ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
+out:
+ trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)),
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
+ return ret;
+}
+EXPORT_SYMBOL(ufshcd_runtime_suspend);
+
+/**
+ * ufshcd_runtime_resume - runtime resume routine
+ * @hba: per adapter instance
+ *
+ * This function basically brings the UFS device, UniPro link and controller
+ * to active state. Following operations are done in this function:
+ *
+ * 1. Turn on all the controller related clocks
+ * 2. Bring the UniPro link out of Hibernate state
+ * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
+ * to active state.
+ * 4. If auto-bkops is enabled on the device, disable it.
+ *
+ * So following would be the possible power state after this function return
+ * successfully:
+ * S1: UFS device in Active state with VCC rail ON
+ * UniPro link in Active state
+ * All the UFS/UniPro controller clocks are ON
+ *
+ * Returns 0 for success and non-zero for failure
+ */
+int ufshcd_runtime_resume(struct ufs_hba *hba)
+{
+ int ret = 0;
+ ktime_t start = ktime_get();
+
+ if (!hba)
+ return -EINVAL;
+
+ if (!hba->is_powered)
+ goto out;
+ else
+ ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
+out:
+ trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)),
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
+ return ret;
+}
+EXPORT_SYMBOL(ufshcd_runtime_resume);
+
+int ufshcd_runtime_idle(struct ufs_hba *hba)
+{
+ return 0;
+}
+EXPORT_SYMBOL(ufshcd_runtime_idle);
+
+/**
+ * ufshcd_shutdown - shutdown routine
+ * @hba: per adapter instance
+ *
+ * This function would power off both UFS device and UFS link.
+ *
+ * Returns 0 always to allow force shutdown even in case of errors.
+ */
+int ufshcd_shutdown(struct ufs_hba *hba)
+{
+ int ret = 0;
+
+ if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
+ goto out;
+
+ if (pm_runtime_suspended(hba->dev)) {
+ ret = ufshcd_runtime_resume(hba);
+ if (ret)
+ goto out;
+ }
+
+ ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
+out:
+ if (ret)
+ dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
+ /* allow force shutdown even in case of errors */
+ return 0;
+}
+EXPORT_SYMBOL(ufshcd_shutdown);
+
+/**
+ * ufshcd_remove - de-allocate SCSI host and host memory space
+ * data structure memory
+ * @hba: per adapter instance
+ */
+void ufshcd_remove(struct ufs_hba *hba)
+{
+ ufs_sysfs_remove_nodes(hba->dev);
+ scsi_remove_host(hba->host);
+ /* disable interrupts */
+ ufshcd_disable_intr(hba, hba->intr_mask);
+ ufshcd_hba_stop(hba, true);
+
+ ufshcd_exit_clk_gating(hba);
+ if (ufshcd_is_clkscaling_supported(hba))
+ device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
+ ufshcd_hba_exit(hba);
+}
+EXPORT_SYMBOL_GPL(ufshcd_remove);
+
+/**
+ * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
+ * @hba: pointer to Host Bus Adapter (HBA)
+ */
+void ufshcd_dealloc_host(struct ufs_hba *hba)
+{
+ scsi_host_put(hba->host);
+}
+EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
+
+/**
+ * ufshcd_set_dma_mask - Set dma mask based on the controller
+ * addressing capability
+ * @hba: per adapter instance
+ *
+ * Returns 0 for success, non-zero for failure
+ */
+static int ufshcd_set_dma_mask(struct ufs_hba *hba)
+{
+ if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
+ if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
+ return 0;
+ }
+ return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
+}
+
+/**
+ * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
+ * @dev: pointer to device handle
+ * @hba_handle: driver private handle
+ * Returns 0 on success, non-zero value on failure
+ */
+int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
+{
+ struct Scsi_Host *host;
+ struct ufs_hba *hba;
+ int err = 0;
+
+ if (!dev) {
+ dev_err(dev,
+ "Invalid memory reference for dev is NULL\n");
+ err = -ENODEV;
+ goto out_error;
+ }
+
+ host = scsi_host_alloc(&ufshcd_driver_template,
+ sizeof(struct ufs_hba));
+ if (!host) {
+ dev_err(dev, "scsi_host_alloc failed\n");
+ err = -ENOMEM;
+ goto out_error;
+ }
+ hba = shost_priv(host);
+ hba->host = host;
+ hba->dev = dev;
+ *hba_handle = hba;
+
+ INIT_LIST_HEAD(&hba->clk_list_head);
+
+out_error:
+ return err;
+}
+EXPORT_SYMBOL(ufshcd_alloc_host);
+
+/**
+ * ufshcd_init - Driver initialization routine
+ * @hba: per-adapter instance
+ * @mmio_base: base register address
+ * @irq: Interrupt line of device
+ * Returns 0 on success, non-zero value on failure
+ */
+int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
+{
+ int err;
+ struct Scsi_Host *host = hba->host;
+ struct device *dev = hba->dev;
+
+ if (!mmio_base) {
+ dev_err(hba->dev,
+ "Invalid memory reference for mmio_base is NULL\n");
+ err = -ENODEV;
+ goto out_error;
+ }
+
+ hba->mmio_base = mmio_base;
+ hba->irq = irq;
+
+ /* Set descriptor lengths to specification defaults */
+ ufshcd_def_desc_sizes(hba);
+
+ err = ufshcd_hba_init(hba);
+ if (err)
+ goto out_error;
+
+ /* Read capabilities registers */
+ ufshcd_hba_capabilities(hba);
+
+ /* Get UFS version supported by the controller */
+ hba->ufs_version = ufshcd_get_ufs_version(hba);
+
+ if ((hba->ufs_version != UFSHCI_VERSION_10) &&
+ (hba->ufs_version != UFSHCI_VERSION_11) &&
+ (hba->ufs_version != UFSHCI_VERSION_20) &&
+ (hba->ufs_version != UFSHCI_VERSION_21))
+ dev_err(hba->dev, "invalid UFS version 0x%x\n",
+ hba->ufs_version);
+
+ /* Get Interrupt bit mask per version */
+ hba->intr_mask = ufshcd_get_intr_mask(hba);
+
+ err = ufshcd_set_dma_mask(hba);
+ if (err) {
+ dev_err(hba->dev, "set dma mask failed\n");
+ goto out_disable;
+ }
+
+ /* Allocate memory for host memory space */
+ err = ufshcd_memory_alloc(hba);
+ if (err) {
+ dev_err(hba->dev, "Memory allocation failed\n");
+ goto out_disable;
+ }
+
+ /* Configure LRB */
+ ufshcd_host_memory_configure(hba);
+
+ host->can_queue = hba->nutrs;
+ host->cmd_per_lun = hba->nutrs;
+ host->max_id = UFSHCD_MAX_ID;
+ host->max_lun = UFS_MAX_LUNS;
+ host->max_channel = UFSHCD_MAX_CHANNEL;
+ host->unique_id = host->host_no;
+ host->max_cmd_len = MAX_CDB_SIZE;
+
+ hba->max_pwr_info.is_valid = false;
+
+ /* Initailize wait queue for task management */
+ init_waitqueue_head(&hba->tm_wq);
+ init_waitqueue_head(&hba->tm_tag_wq);
+
+ /* Initialize work queues */
+ INIT_WORK(&hba->eh_work, ufshcd_err_handler);
+ INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
+
+ /* Initialize UIC command mutex */
+ mutex_init(&hba->uic_cmd_mutex);
+
+ /* Initialize mutex for device management commands */
+ mutex_init(&hba->dev_cmd.lock);
+
+ init_rwsem(&hba->clk_scaling_lock);
+
+ /* Initialize device management tag acquire wait queue */
+ init_waitqueue_head(&hba->dev_cmd.tag_wq);
+
+ ufshcd_init_clk_gating(hba);
+
+ /*
+ * In order to avoid any spurious interrupt immediately after
+ * registering UFS controller interrupt handler, clear any pending UFS
+ * interrupt status and disable all the UFS interrupts.
+ */
+ ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
+ REG_INTERRUPT_STATUS);
+ ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
+ /*
+ * Make sure that UFS interrupts are disabled and any pending interrupt
+ * status is cleared before registering UFS interrupt handler.
+ */
+ mb();
+
+ /* IRQ registration */
+ err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
+ if (err) {
+ dev_err(hba->dev, "request irq failed\n");
+ goto exit_gating;
+ } else {
+ hba->is_irq_enabled = true;
+ }
+
+ err = scsi_add_host(host, hba->dev);
+ if (err) {
+ dev_err(hba->dev, "scsi_add_host failed\n");
+ goto exit_gating;
+ }
+
+ /* Host controller enable */
+ err = ufshcd_hba_enable(hba);
+ if (err) {
+ dev_err(hba->dev, "Host controller enable failed\n");
+ ufshcd_print_host_regs(hba);
+ ufshcd_print_host_state(hba);
+ goto out_remove_scsi_host;
+ }
+
+ if (ufshcd_is_clkscaling_supported(hba)) {
+ char wq_name[sizeof("ufs_clkscaling_00")];
+
+ INIT_WORK(&hba->clk_scaling.suspend_work,
+ ufshcd_clk_scaling_suspend_work);
+ INIT_WORK(&hba->clk_scaling.resume_work,
+ ufshcd_clk_scaling_resume_work);
+
+ snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
+ host->host_no);
+ hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
+
+ ufshcd_clkscaling_init_sysfs(hba);
+ }
+
+ /*
+ * Set the default power management level for runtime and system PM.
+ * Default power saving mode is to keep UFS link in Hibern8 state
+ * and UFS device in sleep state.
+ */
+ hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+ UFS_SLEEP_PWR_MODE,
+ UIC_LINK_HIBERN8_STATE);
+ hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+ UFS_SLEEP_PWR_MODE,
+ UIC_LINK_HIBERN8_STATE);
+
+ /* Set the default auto-hiberate idle timer value to 150 ms */
+ if (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) {
+ hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
+ FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
+ }
+
+ /* Hold auto suspend until async scan completes */
+ pm_runtime_get_sync(dev);
+ atomic_set(&hba->scsi_block_reqs_cnt, 0);
+ /*
+ * We are assuming that device wasn't put in sleep/power-down
+ * state exclusively during the boot stage before kernel.
+ * This assumption helps avoid doing link startup twice during
+ * ufshcd_probe_hba().
+ */
+ ufshcd_set_ufs_dev_active(hba);
+
+ async_schedule(ufshcd_async_scan, hba);
+ ufs_sysfs_add_nodes(hba->dev);
+
+ return 0;
+
+out_remove_scsi_host:
+ scsi_remove_host(hba->host);
+exit_gating:
+ ufshcd_exit_clk_gating(hba);
+out_disable:
+ hba->is_irq_enabled = false;
+ ufshcd_hba_exit(hba);
+out_error:
+ return err;
+}
+EXPORT_SYMBOL_GPL(ufshcd_init);
+
+MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
+MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
+MODULE_DESCRIPTION("Generic UFS host controller driver Core");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/rr-cache/e4f5bcdfe9ae1a8e2987cd336d16246930272948/preimage b/rr-cache/e4f5bcdfe9ae1a8e2987cd336d16246930272948/preimage
new file mode 100644
index 0000000..ee66fed
--- /dev/null
+++ b/rr-cache/e4f5bcdfe9ae1a8e2987cd336d16246930272948/preimage
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+#include "tsens.h"
+
+static int tsens_get_temp(void *data, int *temp)
+{
+ const struct tsens_sensor *s = data;
+ struct tsens_device *tmdev = s->tmdev;
+
+ return tmdev->ops->get_temp(tmdev, s->id, temp);
+}
+
+static int tsens_get_trend(void *p, int trip, enum thermal_trend *trend)
+{
+ const struct tsens_sensor *s = p;
+ struct tsens_device *tmdev = s->tmdev;
+
+ if (tmdev->ops->get_trend)
+ return tmdev->ops->get_trend(tmdev, s->id, trend);
+
+ return -ENOTSUPP;
+}
+
+static int __maybe_unused tsens_suspend(struct device *dev)
+{
+ struct tsens_device *tmdev = dev_get_drvdata(dev);
+
+ if (tmdev->ops && tmdev->ops->suspend)
+ return tmdev->ops->suspend(tmdev);
+
+ return 0;
+}
+
+static int __maybe_unused tsens_resume(struct device *dev)
+{
+ struct tsens_device *tmdev = dev_get_drvdata(dev);
+
+ if (tmdev->ops && tmdev->ops->resume)
+ return tmdev->ops->resume(tmdev);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(tsens_pm_ops, tsens_suspend, tsens_resume);
+
+static const struct of_device_id tsens_table[] = {
+ {
+ .compatible = "qcom,msm8916-tsens",
+ .data = &data_8916,
+ }, {
+ .compatible = "qcom,msm8974-tsens",
+ .data = &data_8974,
+ }, {
+ .compatible = "qcom,msm8996-tsens",
+ .data = &data_8996,
+ },
+ {
+ .compatible = "qcom,msm8998-tsens",
+ .data = &data_8998,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, tsens_table);
+
+static const struct thermal_zone_of_device_ops tsens_of_ops = {
+ .get_temp = tsens_get_temp,
+ .get_trend = tsens_get_trend,
+};
+
+static int tsens_register(struct tsens_device *tmdev)
+{
+ int i;
+ struct thermal_zone_device *tzd;
+ u32 *hw_id, n = tmdev->num_sensors;
+
+ hw_id = devm_kcalloc(tmdev->dev, n, sizeof(u32), GFP_KERNEL);
+ if (!hw_id)
+ return -ENOMEM;
+
+ for (i = 0; i < tmdev->num_sensors; i++) {
+ tmdev->sensor[i].tmdev = tmdev;
+ tmdev->sensor[i].id = i;
+ tzd = devm_thermal_zone_of_sensor_register(tmdev->dev, i,
+ &tmdev->sensor[i],
+ &tsens_of_ops);
+ if (IS_ERR(tzd))
+ continue;
+ tmdev->sensor[i].tzd = tzd;
+ if (tmdev->ops->enable)
+ tmdev->ops->enable(tmdev, i);
+ }
+ return 0;
+}
+
+static int tsens_probe(struct platform_device *pdev)
+{
+ int ret, i;
+ struct device *dev;
+ struct device_node *np;
+ struct tsens_device *tmdev;
+ const struct tsens_data *data;
+ const struct of_device_id *id;
+ u32 num_sensors;
+
+ if (pdev->dev.of_node)
+ dev = &pdev->dev;
+ else
+ dev = pdev->dev.parent;
+
+ np = dev->of_node;
+
+ id = of_match_node(tsens_table, np);
+ if (id)
+ data = id->data;
+ else
+ data = &data_8960;
+
+ num_sensors = data->num_sensors;
+
+ if (np)
+ of_property_read_u32(np, "#qcom,sensors", &num_sensors);
+
+ if (num_sensors <= 0) {
+ dev_err(dev, "invalid number of sensors\n");
+ return -EINVAL;
+ }
+
+<<<<<<<
+ tmdev = devm_kzalloc(dev,
+ struct_size(tmdev, sensor, num_sensors),
+ GFP_KERNEL);
+=======
+ tmdev = devm_kzalloc(dev, sizeof(*tmdev) +
+ num_sensors * sizeof(*s), GFP_KERNEL);
+>>>>>>>
+ if (!tmdev)
+ return -ENOMEM;
+
+ tmdev->dev = dev;
+ tmdev->num_sensors = num_sensors;
+ tmdev->ops = data->ops;
+ for (i = 0; i < tmdev->num_sensors; i++) {
+ if (data->hw_ids)
+ tmdev->sensor[i].hw_id = data->hw_ids[i];
+ else
+ tmdev->sensor[i].hw_id = i;
+ }
+
+ if (!tmdev->ops || !tmdev->ops->init || !tmdev->ops->get_temp)
+ return -EINVAL;
+
+ ret = tmdev->ops->init(tmdev);
+ if (ret < 0) {
+ dev_err(dev, "tsens init failed\n");
+ return ret;
+ }
+
+ if (tmdev->ops->calibrate) {
+ ret = tmdev->ops->calibrate(tmdev);
+ if (ret < 0) {
+ dev_err(dev, "tsens calibration failed\n");
+ return ret;
+ }
+ }
+
+ ret = tsens_register(tmdev);
+
+ platform_set_drvdata(pdev, tmdev);
+
+ return ret;
+}
+
+static int tsens_remove(struct platform_device *pdev)
+{
+ struct tsens_device *tmdev = platform_get_drvdata(pdev);
+
+ if (tmdev->ops->disable)
+ tmdev->ops->disable(tmdev);
+
+ return 0;
+}
+
+static struct platform_driver tsens_driver = {
+ .probe = tsens_probe,
+ .remove = tsens_remove,
+ .driver = {
+ .name = "qcom-tsens",
+ .pm = &tsens_pm_ops,
+ .of_match_table = tsens_table,
+ },
+};
+module_platform_driver(tsens_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QCOM Temperature Sensor driver");
+MODULE_ALIAS("platform:qcom-tsens");
diff --git a/rr-cache/e53cae16483794180d7a06fb5b7dee0d4975d4a9/postimage b/rr-cache/e53cae16483794180d7a06fb5b7dee0d4975d4a9/postimage
new file mode 100644
index 0000000..f4943e1
--- /dev/null
+++ b/rr-cache/e53cae16483794180d7a06fb5b7dee0d4975d4a9/postimage
@@ -0,0 +1,174 @@
+/*
+ * Copyright (C) 2016 Linaro.
+ * Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "cpufreq-dt.h"
+
+/*
+ * Machines for which the cpufreq device is *always* created, mostly used for
+ * platforms using "operating-points" (V1) property.
+ */
+static const struct of_device_id whitelist[] __initconst = {
+ { .compatible = "allwinner,sun4i-a10", },
+ { .compatible = "allwinner,sun5i-a10s", },
+ { .compatible = "allwinner,sun5i-a13", },
+ { .compatible = "allwinner,sun5i-r8", },
+ { .compatible = "allwinner,sun6i-a31", },
+ { .compatible = "allwinner,sun6i-a31s", },
+ { .compatible = "allwinner,sun7i-a20", },
+ { .compatible = "allwinner,sun8i-a23", },
+ { .compatible = "allwinner,sun8i-a83t", },
+ { .compatible = "allwinner,sun8i-h3", },
+
+ { .compatible = "apm,xgene-shadowcat", },
+
+ { .compatible = "arm,integrator-ap", },
+ { .compatible = "arm,integrator-cp", },
+
+ { .compatible = "hisilicon,hi3660", },
+
+ { .compatible = "fsl,imx27", },
+ { .compatible = "fsl,imx51", },
+ { .compatible = "fsl,imx53", },
+ { .compatible = "fsl,imx7d", },
+
+ { .compatible = "marvell,berlin", },
+ { .compatible = "marvell,pxa250", },
+ { .compatible = "marvell,pxa270", },
+
+ { .compatible = "samsung,exynos3250", },
+ { .compatible = "samsung,exynos4210", },
+ { .compatible = "samsung,exynos5250", },
+#ifndef CONFIG_BL_SWITCHER
+ { .compatible = "samsung,exynos5800", },
+#endif
+
+ { .compatible = "renesas,emev2", },
+ { .compatible = "renesas,r7s72100", },
+ { .compatible = "renesas,r8a73a4", },
+ { .compatible = "renesas,r8a7740", },
+ { .compatible = "renesas,r8a7743", },
+ { .compatible = "renesas,r8a7745", },
+ { .compatible = "renesas,r8a7778", },
+ { .compatible = "renesas,r8a7779", },
+ { .compatible = "renesas,r8a7790", },
+ { .compatible = "renesas,r8a7791", },
+ { .compatible = "renesas,r8a7792", },
+ { .compatible = "renesas,r8a7793", },
+ { .compatible = "renesas,r8a7794", },
+ { .compatible = "renesas,sh73a0", },
+
+ { .compatible = "rockchip,rk2928", },
+ { .compatible = "rockchip,rk3036", },
+ { .compatible = "rockchip,rk3066a", },
+ { .compatible = "rockchip,rk3066b", },
+ { .compatible = "rockchip,rk3188", },
+ { .compatible = "rockchip,rk3228", },
+ { .compatible = "rockchip,rk3288", },
+ { .compatible = "rockchip,rk3328", },
+ { .compatible = "rockchip,rk3366", },
+ { .compatible = "rockchip,rk3368", },
+ { .compatible = "rockchip,rk3399", },
+
+ { .compatible = "st-ericsson,u8500", },
+ { .compatible = "st-ericsson,u8540", },
+ { .compatible = "st-ericsson,u9500", },
+ { .compatible = "st-ericsson,u9540", },
+
+ { .compatible = "ti,omap2", },
+ { .compatible = "ti,omap3", },
+ { .compatible = "ti,omap4", },
+ { .compatible = "ti,omap5", },
+
+ { .compatible = "xlnx,zynq-7000", },
+ { .compatible = "xlnx,zynqmp", },
+
+ { }
+};
+
+/*
+ * Machines for which the cpufreq device is *not* created, mostly used for
+ * platforms using "operating-points-v2" property.
+ */
+static const struct of_device_id blacklist[] __initconst = {
+ { .compatible = "calxeda,highbank", },
+ { .compatible = "calxeda,ecx-2000", },
+
+ { .compatible = "marvell,armadaxp", },
+
+ { .compatible = "mediatek,mt2701", },
+ { .compatible = "mediatek,mt2712", },
+ { .compatible = "mediatek,mt7622", },
+ { .compatible = "mediatek,mt7623", },
+ { .compatible = "mediatek,mt817x", },
+ { .compatible = "mediatek,mt8173", },
+ { .compatible = "mediatek,mt8176", },
+
+ { .compatible = "nvidia,tegra124", },
+
+ { .compatible = "qcom,apq8096", },
+ { .compatible = "qcom,apq8016", },
+ { .compatible = "qcom,msm8996", },
+
+ { .compatible = "st,stih407", },
+ { .compatible = "st,stih410", },
+
+ { .compatible = "sigma,tango4", },
+
+ { .compatible = "ti,am33xx", },
+ { .compatible = "ti,am43", },
+ { .compatible = "ti,dra7", },
+
+ { }
+};
+
+static bool __init cpu0_node_has_opp_v2_prop(void)
+{
+ struct device_node *np = of_cpu_device_node_get(0);
+ bool ret = false;
+
+ if (of_get_property(np, "operating-points-v2", NULL))
+ ret = true;
+
+ of_node_put(np);
+ return ret;
+}
+
+static int __init cpufreq_dt_platdev_init(void)
+{
+ struct device_node *np = of_find_node_by_path("/");
+ const struct of_device_id *match;
+ const void *data = NULL;
+
+ if (!np)
+ return -ENODEV;
+
+ match = of_match_node(whitelist, np);
+ if (match) {
+ data = match->data;
+ goto create_pdev;
+ }
+
+ if (cpu0_node_has_opp_v2_prop() && !of_match_node(blacklist, np))
+ goto create_pdev;
+
+ of_node_put(np);
+ return -ENODEV;
+
+create_pdev:
+ of_node_put(np);
+ return PTR_ERR_OR_ZERO(platform_device_register_data(NULL, "cpufreq-dt",
+ -1, data,
+ sizeof(struct cpufreq_dt_platform_data)));
+}
+device_initcall(cpufreq_dt_platdev_init);
diff --git a/rr-cache/e53cae16483794180d7a06fb5b7dee0d4975d4a9/preimage b/rr-cache/e53cae16483794180d7a06fb5b7dee0d4975d4a9/preimage
new file mode 100644
index 0000000..3d98564
--- /dev/null
+++ b/rr-cache/e53cae16483794180d7a06fb5b7dee0d4975d4a9/preimage
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2016 Linaro.
+ * Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "cpufreq-dt.h"
+
+/*
+ * Machines for which the cpufreq device is *always* created, mostly used for
+ * platforms using "operating-points" (V1) property.
+ */
+static const struct of_device_id whitelist[] __initconst = {
+ { .compatible = "allwinner,sun4i-a10", },
+ { .compatible = "allwinner,sun5i-a10s", },
+ { .compatible = "allwinner,sun5i-a13", },
+ { .compatible = "allwinner,sun5i-r8", },
+ { .compatible = "allwinner,sun6i-a31", },
+ { .compatible = "allwinner,sun6i-a31s", },
+ { .compatible = "allwinner,sun7i-a20", },
+ { .compatible = "allwinner,sun8i-a23", },
+ { .compatible = "allwinner,sun8i-a83t", },
+ { .compatible = "allwinner,sun8i-h3", },
+
+ { .compatible = "apm,xgene-shadowcat", },
+
+ { .compatible = "arm,integrator-ap", },
+ { .compatible = "arm,integrator-cp", },
+
+ { .compatible = "hisilicon,hi3660", },
+
+ { .compatible = "fsl,imx27", },
+ { .compatible = "fsl,imx51", },
+ { .compatible = "fsl,imx53", },
+ { .compatible = "fsl,imx7d", },
+
+ { .compatible = "marvell,berlin", },
+ { .compatible = "marvell,pxa250", },
+ { .compatible = "marvell,pxa270", },
+
+ { .compatible = "samsung,exynos3250", },
+ { .compatible = "samsung,exynos4210", },
+ { .compatible = "samsung,exynos5250", },
+#ifndef CONFIG_BL_SWITCHER
+ { .compatible = "samsung,exynos5800", },
+#endif
+
+ { .compatible = "renesas,emev2", },
+ { .compatible = "renesas,r7s72100", },
+ { .compatible = "renesas,r8a73a4", },
+ { .compatible = "renesas,r8a7740", },
+ { .compatible = "renesas,r8a7743", },
+ { .compatible = "renesas,r8a7745", },
+ { .compatible = "renesas,r8a7778", },
+ { .compatible = "renesas,r8a7779", },
+ { .compatible = "renesas,r8a7790", },
+ { .compatible = "renesas,r8a7791", },
+ { .compatible = "renesas,r8a7792", },
+ { .compatible = "renesas,r8a7793", },
+ { .compatible = "renesas,r8a7794", },
+ { .compatible = "renesas,sh73a0", },
+
+ { .compatible = "rockchip,rk2928", },
+ { .compatible = "rockchip,rk3036", },
+ { .compatible = "rockchip,rk3066a", },
+ { .compatible = "rockchip,rk3066b", },
+ { .compatible = "rockchip,rk3188", },
+ { .compatible = "rockchip,rk3228", },
+ { .compatible = "rockchip,rk3288", },
+ { .compatible = "rockchip,rk3328", },
+ { .compatible = "rockchip,rk3366", },
+ { .compatible = "rockchip,rk3368", },
+ { .compatible = "rockchip,rk3399", },
+
+ { .compatible = "st-ericsson,u8500", },
+ { .compatible = "st-ericsson,u8540", },
+ { .compatible = "st-ericsson,u9500", },
+ { .compatible = "st-ericsson,u9540", },
+
+ { .compatible = "ti,omap2", },
+ { .compatible = "ti,omap3", },
+ { .compatible = "ti,omap4", },
+ { .compatible = "ti,omap5", },
+
+ { .compatible = "xlnx,zynq-7000", },
+ { .compatible = "xlnx,zynqmp", },
+
+ { }
+};
+
+/*
+ * Machines for which the cpufreq device is *not* created, mostly used for
+ * platforms using "operating-points-v2" property.
+ */
+static const struct of_device_id blacklist[] __initconst = {
+ { .compatible = "calxeda,highbank", },
+ { .compatible = "calxeda,ecx-2000", },
+
+ { .compatible = "marvell,armadaxp", },
+
+ { .compatible = "mediatek,mt2701", },
+ { .compatible = "mediatek,mt2712", },
+ { .compatible = "mediatek,mt7622", },
+ { .compatible = "mediatek,mt7623", },
+ { .compatible = "mediatek,mt817x", },
+ { .compatible = "mediatek,mt8173", },
+ { .compatible = "mediatek,mt8176", },
+
+ { .compatible = "nvidia,tegra124", },
+
+<<<<<<<
+ { .compatible = "qcom,apq8016", },
+=======
+ { .compatible = "qcom,apq8096", },
+ { .compatible = "qcom,msm8996", },
+>>>>>>>
+
+ { .compatible = "st,stih407", },
+ { .compatible = "st,stih410", },
+
+ { .compatible = "sigma,tango4", },
+
+ { .compatible = "ti,am33xx", },
+ { .compatible = "ti,am43", },
+ { .compatible = "ti,dra7", },
+
+ { }
+};
+
+static bool __init cpu0_node_has_opp_v2_prop(void)
+{
+ struct device_node *np = of_cpu_device_node_get(0);
+ bool ret = false;
+
+ if (of_get_property(np, "operating-points-v2", NULL))
+ ret = true;
+
+ of_node_put(np);
+ return ret;
+}
+
+static int __init cpufreq_dt_platdev_init(void)
+{
+ struct device_node *np = of_find_node_by_path("/");
+ const struct of_device_id *match;
+ const void *data = NULL;
+
+ if (!np)
+ return -ENODEV;
+
+ match = of_match_node(whitelist, np);
+ if (match) {
+ data = match->data;
+ goto create_pdev;
+ }
+
+ if (cpu0_node_has_opp_v2_prop() && !of_match_node(blacklist, np))
+ goto create_pdev;
+
+ of_node_put(np);
+ return -ENODEV;
+
+create_pdev:
+ of_node_put(np);
+ return PTR_ERR_OR_ZERO(platform_device_register_data(NULL, "cpufreq-dt",
+ -1, data,
+ sizeof(struct cpufreq_dt_platform_data)));
+}
+device_initcall(cpufreq_dt_platdev_init);
diff --git a/rr-cache/e53cae16483794180d7a06fb5b7dee0d4975d4a9/thisimage b/rr-cache/e53cae16483794180d7a06fb5b7dee0d4975d4a9/thisimage
new file mode 100644
index 0000000..3d98564
--- /dev/null
+++ b/rr-cache/e53cae16483794180d7a06fb5b7dee0d4975d4a9/thisimage
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2016 Linaro.
+ * Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "cpufreq-dt.h"
+
+/*
+ * Machines for which the cpufreq device is *always* created, mostly used for
+ * platforms using "operating-points" (V1) property.
+ */
+static const struct of_device_id whitelist[] __initconst = {
+ { .compatible = "allwinner,sun4i-a10", },
+ { .compatible = "allwinner,sun5i-a10s", },
+ { .compatible = "allwinner,sun5i-a13", },
+ { .compatible = "allwinner,sun5i-r8", },
+ { .compatible = "allwinner,sun6i-a31", },
+ { .compatible = "allwinner,sun6i-a31s", },
+ { .compatible = "allwinner,sun7i-a20", },
+ { .compatible = "allwinner,sun8i-a23", },
+ { .compatible = "allwinner,sun8i-a83t", },
+ { .compatible = "allwinner,sun8i-h3", },
+
+ { .compatible = "apm,xgene-shadowcat", },
+
+ { .compatible = "arm,integrator-ap", },
+ { .compatible = "arm,integrator-cp", },
+
+ { .compatible = "hisilicon,hi3660", },
+
+ { .compatible = "fsl,imx27", },
+ { .compatible = "fsl,imx51", },
+ { .compatible = "fsl,imx53", },
+ { .compatible = "fsl,imx7d", },
+
+ { .compatible = "marvell,berlin", },
+ { .compatible = "marvell,pxa250", },
+ { .compatible = "marvell,pxa270", },
+
+ { .compatible = "samsung,exynos3250", },
+ { .compatible = "samsung,exynos4210", },
+ { .compatible = "samsung,exynos5250", },
+#ifndef CONFIG_BL_SWITCHER
+ { .compatible = "samsung,exynos5800", },
+#endif
+
+ { .compatible = "renesas,emev2", },
+ { .compatible = "renesas,r7s72100", },
+ { .compatible = "renesas,r8a73a4", },
+ { .compatible = "renesas,r8a7740", },
+ { .compatible = "renesas,r8a7743", },
+ { .compatible = "renesas,r8a7745", },
+ { .compatible = "renesas,r8a7778", },
+ { .compatible = "renesas,r8a7779", },
+ { .compatible = "renesas,r8a7790", },
+ { .compatible = "renesas,r8a7791", },
+ { .compatible = "renesas,r8a7792", },
+ { .compatible = "renesas,r8a7793", },
+ { .compatible = "renesas,r8a7794", },
+ { .compatible = "renesas,sh73a0", },
+
+ { .compatible = "rockchip,rk2928", },
+ { .compatible = "rockchip,rk3036", },
+ { .compatible = "rockchip,rk3066a", },
+ { .compatible = "rockchip,rk3066b", },
+ { .compatible = "rockchip,rk3188", },
+ { .compatible = "rockchip,rk3228", },
+ { .compatible = "rockchip,rk3288", },
+ { .compatible = "rockchip,rk3328", },
+ { .compatible = "rockchip,rk3366", },
+ { .compatible = "rockchip,rk3368", },
+ { .compatible = "rockchip,rk3399", },
+
+ { .compatible = "st-ericsson,u8500", },
+ { .compatible = "st-ericsson,u8540", },
+ { .compatible = "st-ericsson,u9500", },
+ { .compatible = "st-ericsson,u9540", },
+
+ { .compatible = "ti,omap2", },
+ { .compatible = "ti,omap3", },
+ { .compatible = "ti,omap4", },
+ { .compatible = "ti,omap5", },
+
+ { .compatible = "xlnx,zynq-7000", },
+ { .compatible = "xlnx,zynqmp", },
+
+ { }
+};
+
+/*
+ * Machines for which the cpufreq device is *not* created, mostly used for
+ * platforms using "operating-points-v2" property.
+ */
+static const struct of_device_id blacklist[] __initconst = {
+ { .compatible = "calxeda,highbank", },
+ { .compatible = "calxeda,ecx-2000", },
+
+ { .compatible = "marvell,armadaxp", },
+
+ { .compatible = "mediatek,mt2701", },
+ { .compatible = "mediatek,mt2712", },
+ { .compatible = "mediatek,mt7622", },
+ { .compatible = "mediatek,mt7623", },
+ { .compatible = "mediatek,mt817x", },
+ { .compatible = "mediatek,mt8173", },
+ { .compatible = "mediatek,mt8176", },
+
+ { .compatible = "nvidia,tegra124", },
+
+<<<<<<<
+ { .compatible = "qcom,apq8016", },
+=======
+ { .compatible = "qcom,apq8096", },
+ { .compatible = "qcom,msm8996", },
+>>>>>>>
+
+ { .compatible = "st,stih407", },
+ { .compatible = "st,stih410", },
+
+ { .compatible = "sigma,tango4", },
+
+ { .compatible = "ti,am33xx", },
+ { .compatible = "ti,am43", },
+ { .compatible = "ti,dra7", },
+
+ { }
+};
+
+static bool __init cpu0_node_has_opp_v2_prop(void)
+{
+ struct device_node *np = of_cpu_device_node_get(0);
+ bool ret = false;
+
+ if (of_get_property(np, "operating-points-v2", NULL))
+ ret = true;
+
+ of_node_put(np);
+ return ret;
+}
+
+static int __init cpufreq_dt_platdev_init(void)
+{
+ struct device_node *np = of_find_node_by_path("/");
+ const struct of_device_id *match;
+ const void *data = NULL;
+
+ if (!np)
+ return -ENODEV;
+
+ match = of_match_node(whitelist, np);
+ if (match) {
+ data = match->data;
+ goto create_pdev;
+ }
+
+ if (cpu0_node_has_opp_v2_prop() && !of_match_node(blacklist, np))
+ goto create_pdev;
+
+ of_node_put(np);
+ return -ENODEV;
+
+create_pdev:
+ of_node_put(np);
+ return PTR_ERR_OR_ZERO(platform_device_register_data(NULL, "cpufreq-dt",
+ -1, data,
+ sizeof(struct cpufreq_dt_platform_data)));
+}
+device_initcall(cpufreq_dt_platdev_init);
diff --git a/rr-cache/e5e8949aba32b84a8e970724fed87908fad4111c/postimage b/rr-cache/e5e8949aba32b84a8e970724fed87908fad4111c/postimage
new file mode 100644
index 0000000..bd1f2c7
--- /dev/null
+++ b/rr-cache/e5e8949aba32b84a8e970724fed87908fad4111c/postimage
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_GDSC_H__
+#define __QCOM_GDSC_H__
+
+#include <linux/err.h>
+#include <linux/pm_domain.h>
+
+struct regmap;
+struct reset_controller_dev;
+
+/**
+ * struct gdsc - Globally Distributed Switch Controller
+ * @pd: generic power domain
+ * @regmap: regmap for MMIO accesses
+ * @gdscr: gsdc control register
+ * @gds_hw_ctrl: gds_hw_ctrl register
+ * @cxcs: offsets of branch registers to toggle mem/periph bits in
+ * @cxc_count: number of @cxcs
+ * @pwrsts: Possible powerdomain power states
+ * @resets: ids of resets associated with this gdsc
+ * @reset_count: number of @resets
+ * @rcdev: reset controller
+ */
+struct gdsc {
+ struct generic_pm_domain pd;
+ struct generic_pm_domain *parent;
+ struct regmap *regmap;
+ unsigned int gdscr;
+ unsigned int gds_hw_ctrl;
+ unsigned int clamp_io_ctrl;
+ unsigned int *cxcs;
+ unsigned int cxc_count;
+ const u8 pwrsts;
+/* Powerdomain allowable state bitfields */
+#define PWRSTS_OFF BIT(0)
+#define PWRSTS_RET BIT(1)
+#define PWRSTS_ON BIT(2)
+#define PWRSTS_OFF_ON (PWRSTS_OFF | PWRSTS_ON)
+#define PWRSTS_RET_ON (PWRSTS_RET | PWRSTS_ON)
+ const u8 flags;
+#define VOTABLE BIT(0)
+#define CLAMP_IO BIT(1)
+#define HW_CTRL BIT(2)
+#define SW_RESET BIT(3)
+#define AON_RESET BIT(4)
+#define POLL_CFG_GDSCR BIT(5)
+#define ALWAYS_ON BIT(6)
+ struct reset_controller_dev *rcdev;
+ unsigned int *resets;
+ unsigned int reset_count;
+};
+
+struct gdsc_desc {
+ struct device *dev;
+ struct gdsc **scs;
+ size_t num;
+};
+
+#ifdef CONFIG_QCOM_GDSC
+int gdsc_register(struct gdsc_desc *desc, struct reset_controller_dev *,
+ struct regmap *);
+void gdsc_unregister(struct gdsc_desc *desc);
+#else
+static inline int gdsc_register(struct gdsc_desc *desc,
+ struct reset_controller_dev *rcdev,
+ struct regmap *r)
+{
+ return -ENOSYS;
+}
+
+static inline void gdsc_unregister(struct gdsc_desc *desc) {};
+#endif /* CONFIG_QCOM_GDSC */
+#endif /* __QCOM_GDSC_H__ */
diff --git a/rr-cache/e5e8949aba32b84a8e970724fed87908fad4111c/preimage b/rr-cache/e5e8949aba32b84a8e970724fed87908fad4111c/preimage
new file mode 100644
index 0000000..df50bb9
--- /dev/null
+++ b/rr-cache/e5e8949aba32b84a8e970724fed87908fad4111c/preimage
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_GDSC_H__
+#define __QCOM_GDSC_H__
+
+#include <linux/err.h>
+#include <linux/pm_domain.h>
+
+struct regmap;
+struct reset_controller_dev;
+
+/**
+ * struct gdsc - Globally Distributed Switch Controller
+ * @pd: generic power domain
+ * @regmap: regmap for MMIO accesses
+ * @gdscr: gsdc control register
+ * @gds_hw_ctrl: gds_hw_ctrl register
+ * @cxcs: offsets of branch registers to toggle mem/periph bits in
+ * @cxc_count: number of @cxcs
+ * @pwrsts: Possible powerdomain power states
+ * @resets: ids of resets associated with this gdsc
+ * @reset_count: number of @resets
+ * @rcdev: reset controller
+ */
+struct gdsc {
+ struct generic_pm_domain pd;
+ struct generic_pm_domain *parent;
+ struct regmap *regmap;
+ unsigned int gdscr;
+ unsigned int gds_hw_ctrl;
+ unsigned int clamp_io_ctrl;
+ unsigned int *cxcs;
+ unsigned int cxc_count;
+ const u8 pwrsts;
+/* Powerdomain allowable state bitfields */
+#define PWRSTS_OFF BIT(0)
+#define PWRSTS_RET BIT(1)
+#define PWRSTS_ON BIT(2)
+#define PWRSTS_OFF_ON (PWRSTS_OFF | PWRSTS_ON)
+#define PWRSTS_RET_ON (PWRSTS_RET | PWRSTS_ON)
+ const u8 flags;
+#define VOTABLE BIT(0)
+#define CLAMP_IO BIT(1)
+#define HW_CTRL BIT(2)
+<<<<<<<
+#define ALWAYS_ON BIT(3)
+=======
+#define SW_RESET BIT(3)
+#define AON_RESET BIT(4)
+#define POLL_CFG_GDSCR BIT(5)
+#define ALWAYS_ON BIT(6)
+>>>>>>>
+ struct reset_controller_dev *rcdev;
+ unsigned int *resets;
+ unsigned int reset_count;
+};
+
+struct gdsc_desc {
+ struct device *dev;
+ struct gdsc **scs;
+ size_t num;
+};
+
+#ifdef CONFIG_QCOM_GDSC
+int gdsc_register(struct gdsc_desc *desc, struct reset_controller_dev *,
+ struct regmap *);
+void gdsc_unregister(struct gdsc_desc *desc);
+#else
+static inline int gdsc_register(struct gdsc_desc *desc,
+ struct reset_controller_dev *rcdev,
+ struct regmap *r)
+{
+ return -ENOSYS;
+}
+
+static inline void gdsc_unregister(struct gdsc_desc *desc) {};
+#endif /* CONFIG_QCOM_GDSC */
+#endif /* __QCOM_GDSC_H__ */
diff --git a/rr-cache/e5e8949aba32b84a8e970724fed87908fad4111c/thisimage b/rr-cache/e5e8949aba32b84a8e970724fed87908fad4111c/thisimage
new file mode 100644
index 0000000..df50bb9
--- /dev/null
+++ b/rr-cache/e5e8949aba32b84a8e970724fed87908fad4111c/thisimage
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_GDSC_H__
+#define __QCOM_GDSC_H__
+
+#include <linux/err.h>
+#include <linux/pm_domain.h>
+
+struct regmap;
+struct reset_controller_dev;
+
+/**
+ * struct gdsc - Globally Distributed Switch Controller
+ * @pd: generic power domain
+ * @regmap: regmap for MMIO accesses
+ * @gdscr: gsdc control register
+ * @gds_hw_ctrl: gds_hw_ctrl register
+ * @cxcs: offsets of branch registers to toggle mem/periph bits in
+ * @cxc_count: number of @cxcs
+ * @pwrsts: Possible powerdomain power states
+ * @resets: ids of resets associated with this gdsc
+ * @reset_count: number of @resets
+ * @rcdev: reset controller
+ */
+struct gdsc {
+ struct generic_pm_domain pd;
+ struct generic_pm_domain *parent;
+ struct regmap *regmap;
+ unsigned int gdscr;
+ unsigned int gds_hw_ctrl;
+ unsigned int clamp_io_ctrl;
+ unsigned int *cxcs;
+ unsigned int cxc_count;
+ const u8 pwrsts;
+/* Powerdomain allowable state bitfields */
+#define PWRSTS_OFF BIT(0)
+#define PWRSTS_RET BIT(1)
+#define PWRSTS_ON BIT(2)
+#define PWRSTS_OFF_ON (PWRSTS_OFF | PWRSTS_ON)
+#define PWRSTS_RET_ON (PWRSTS_RET | PWRSTS_ON)
+ const u8 flags;
+#define VOTABLE BIT(0)
+#define CLAMP_IO BIT(1)
+#define HW_CTRL BIT(2)
+<<<<<<<
+#define ALWAYS_ON BIT(3)
+=======
+#define SW_RESET BIT(3)
+#define AON_RESET BIT(4)
+#define POLL_CFG_GDSCR BIT(5)
+#define ALWAYS_ON BIT(6)
+>>>>>>>
+ struct reset_controller_dev *rcdev;
+ unsigned int *resets;
+ unsigned int reset_count;
+};
+
+struct gdsc_desc {
+ struct device *dev;
+ struct gdsc **scs;
+ size_t num;
+};
+
+#ifdef CONFIG_QCOM_GDSC
+int gdsc_register(struct gdsc_desc *desc, struct reset_controller_dev *,
+ struct regmap *);
+void gdsc_unregister(struct gdsc_desc *desc);
+#else
+static inline int gdsc_register(struct gdsc_desc *desc,
+ struct reset_controller_dev *rcdev,
+ struct regmap *r)
+{
+ return -ENOSYS;
+}
+
+static inline void gdsc_unregister(struct gdsc_desc *desc) {};
+#endif /* CONFIG_QCOM_GDSC */
+#endif /* __QCOM_GDSC_H__ */
diff --git a/rr-cache/ece77c075cbd2f88b3ef8bcec6fb0533986e8390/preimage b/rr-cache/ece77c075cbd2f88b3ef8bcec6fb0533986e8390/preimage
new file mode 100644
index 0000000..2a4fbcb
--- /dev/null
+++ b/rr-cache/ece77c075cbd2f88b3ef8bcec6fb0533986e8390/preimage
@@ -0,0 +1,40 @@
+QCOM Secure Channel Manager (SCM)
+
+Qualcomm processors include an interface to communicate to the secure firmware.
+This interface allows for clients to request different types of actions. These
+can include CPU power up/down, HDCP requests, loading of firmware, and other
+assorted actions.
+
+Required properties:
+- compatible: must contain one of the following:
+ * "qcom,scm-apq8064" for APQ8064 platforms
+ * "qcom,scm-msm8660" for MSM8660 platforms
+ * "qcom,scm-msm8690" for MSM8690 platforms
+ * "qcom,scm-msm8996" for MSM8996 platforms
+<<<<<<<
+ * "qcom,scm-ipq4019" for IPQ4019 platforms
+ * "qcom,scm" for later processors (MSM8916, APQ8084, MSM8974, etc)
+- clocks: One to three clocks may be required based on compatible.
+ * No clock required for "qcom,scm-msm8996", "qcom,scm-ipq4019"
+=======
+ * "qcom,scm-msm8998" for MSM8998 platforms
+ * "qcom,scm" for later processors (MSM8916, APQ8084, MSM8974, etc)
+- clocks: One to three clocks may be required based on compatible.
+ * No clock required for "qcom,scm-msm8996" or "qcom,scm-msm8998"
+>>>>>>>
+ * Only core clock required for "qcom,scm-apq8064", "qcom,scm-msm8660", and "qcom,scm-msm8960"
+ * Core, iface, and bus clocks required for "qcom,scm"
+- clock-names: Must contain "core" for the core clock, "iface" for the interface
+ clock and "bus" for the bus clock per the requirements of the compatible.
+- qcom,dload-mode: phandle to the TCSR hardware block and offset of the
+ download mode control register (optional)
+
+Example for MSM8916:
+
+ firmware {
+ scm {
+ compatible = "qcom,scm";
+ clocks = <&gcc GCC_CRYPTO_CLK> , <&gcc GCC_CRYPTO_AXI_CLK>, <&gcc GCC_CRYPTO_AHB_CLK>;
+ clock-names = "core", "bus", "iface";
+ };
+ };
diff --git a/rr-cache/eeed3d17605fdb06a241cda9eed6d09d601101b6/postimage b/rr-cache/eeed3d17605fdb06a241cda9eed6d09d601101b6/postimage
new file mode 100644
index 0000000..01f4321
--- /dev/null
+++ b/rr-cache/eeed3d17605fdb06a241cda9eed6d09d601101b6/postimage
@@ -0,0 +1,1495 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+// Copyright (c) 2018, Linaro Limited
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/kref.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/soc/qcom/apr.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include "q6dsp-errno.h"
+#include "q6core.h"
+#include "q6afe.h"
+
+/* AFE CMDs */
+#define AFE_PORT_CMD_DEVICE_START 0x000100E5
+#define AFE_PORT_CMD_DEVICE_STOP 0x000100E6
+#define AFE_PORT_CMD_SET_PARAM_V2 0x000100EF
+#define AFE_SVC_CMD_SET_PARAM 0x000100f3
+#define AFE_PORT_CMDRSP_GET_PARAM_V2 0x00010106
+#define AFE_PARAM_ID_HDMI_CONFIG 0x00010210
+#define AFE_MODULE_AUDIO_DEV_INTERFACE 0x0001020C
+#define AFE_MODULE_TDM 0x0001028A
+
+#define AFE_PARAM_ID_CDC_SLIMBUS_SLAVE_CFG 0x00010235
+
+#define AFE_PARAM_ID_LPAIF_CLK_CONFIG 0x00010238
+#define AFE_PARAM_ID_INT_DIGITAL_CDC_CLK_CONFIG 0x00010239
+
+#define AFE_PARAM_ID_SLIMBUS_CONFIG 0x00010212
+#define AFE_PARAM_ID_I2S_CONFIG 0x0001020D
+#define AFE_PARAM_ID_TDM_CONFIG 0x0001029D
+#define AFE_PARAM_ID_PORT_SLOT_MAPPING_CONFIG 0x00010297
+
+/* I2S config specific */
+#define AFE_API_VERSION_I2S_CONFIG 0x1
+#define AFE_PORT_I2S_SD0 0x1
+#define AFE_PORT_I2S_SD1 0x2
+#define AFE_PORT_I2S_SD2 0x3
+#define AFE_PORT_I2S_SD3 0x4
+#define AFE_PORT_I2S_SD0_MASK BIT(0x1)
+#define AFE_PORT_I2S_SD1_MASK BIT(0x2)
+#define AFE_PORT_I2S_SD2_MASK BIT(0x3)
+#define AFE_PORT_I2S_SD3_MASK BIT(0x4)
+#define AFE_PORT_I2S_SD0_1_MASK GENMASK(2, 1)
+#define AFE_PORT_I2S_SD2_3_MASK GENMASK(4, 3)
+#define AFE_PORT_I2S_SD0_1_2_MASK GENMASK(3, 1)
+#define AFE_PORT_I2S_SD0_1_2_3_MASK GENMASK(4, 1)
+#define AFE_PORT_I2S_QUAD01 0x5
+#define AFE_PORT_I2S_QUAD23 0x6
+#define AFE_PORT_I2S_6CHS 0x7
+#define AFE_PORT_I2S_8CHS 0x8
+#define AFE_PORT_I2S_MONO 0x0
+#define AFE_PORT_I2S_STEREO 0x1
+#define AFE_PORT_CONFIG_I2S_WS_SRC_EXTERNAL 0x0
+#define AFE_PORT_CONFIG_I2S_WS_SRC_INTERNAL 0x1
+#define AFE_LINEAR_PCM_DATA 0x0
+
+
+/* Port IDs */
+#define AFE_API_VERSION_HDMI_CONFIG 0x1
+#define AFE_PORT_ID_MULTICHAN_HDMI_RX 0x100E
+
+#define AFE_API_VERSION_SLIMBUS_CONFIG 0x1
+/* Clock set API version */
+#define AFE_API_VERSION_CLOCK_SET 1
+#define Q6AFE_LPASS_CLK_CONFIG_API_VERSION 0x1
+#define AFE_MODULE_CLOCK_SET 0x0001028F
+#define AFE_PARAM_ID_CLOCK_SET 0x00010290
+
+/* SLIMbus Rx port on channel 0. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_0_RX 0x4000
+/* SLIMbus Tx port on channel 0. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_0_TX 0x4001
+/* SLIMbus Rx port on channel 1. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_1_RX 0x4002
+/* SLIMbus Tx port on channel 1. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_1_TX 0x4003
+/* SLIMbus Rx port on channel 2. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_2_RX 0x4004
+/* SLIMbus Tx port on channel 2. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_2_TX 0x4005
+/* SLIMbus Rx port on channel 3. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_3_RX 0x4006
+/* SLIMbus Tx port on channel 3. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_3_TX 0x4007
+/* SLIMbus Rx port on channel 4. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_4_RX 0x4008
+/* SLIMbus Tx port on channel 4. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_4_TX 0x4009
+/* SLIMbus Rx port on channel 5. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_5_RX 0x400a
+/* SLIMbus Tx port on channel 5. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_5_TX 0x400b
+/* SLIMbus Rx port on channel 6. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_6_RX 0x400c
+/* SLIMbus Tx port on channel 6. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_6_TX 0x400d
+#define AFE_PORT_ID_PRIMARY_MI2S_RX 0x1000
+#define AFE_PORT_ID_PRIMARY_MI2S_TX 0x1001
+#define AFE_PORT_ID_SECONDARY_MI2S_RX 0x1002
+#define AFE_PORT_ID_SECONDARY_MI2S_TX 0x1003
+#define AFE_PORT_ID_TERTIARY_MI2S_RX 0x1004
+#define AFE_PORT_ID_TERTIARY_MI2S_TX 0x1005
+#define AFE_PORT_ID_QUATERNARY_MI2S_RX 0x1006
+#define AFE_PORT_ID_QUATERNARY_MI2S_TX 0x1007
+
+/* Start of the range of port IDs for TDM devices. */
+#define AFE_PORT_ID_TDM_PORT_RANGE_START 0x9000
+
+/* End of the range of port IDs for TDM devices. */
+#define AFE_PORT_ID_TDM_PORT_RANGE_END \
+ (AFE_PORT_ID_TDM_PORT_RANGE_START+0x50-1)
+
+/* Size of the range of port IDs for TDM ports. */
+#define AFE_PORT_ID_TDM_PORT_RANGE_SIZE \
+ (AFE_PORT_ID_TDM_PORT_RANGE_END - \
+ AFE_PORT_ID_TDM_PORT_RANGE_START+1)
+
+#define AFE_PORT_ID_PRIMARY_TDM_RX \
+ (AFE_PORT_ID_TDM_PORT_RANGE_START + 0x00)
+#define AFE_PORT_ID_PRIMARY_TDM_RX_1 \
+ (AFE_PORT_ID_PRIMARY_TDM_RX + 0x02)
+#define AFE_PORT_ID_PRIMARY_TDM_RX_2 \
+ (AFE_PORT_ID_PRIMARY_TDM_RX + 0x04)
+#define AFE_PORT_ID_PRIMARY_TDM_RX_3 \
+ (AFE_PORT_ID_PRIMARY_TDM_RX + 0x06)
+#define AFE_PORT_ID_PRIMARY_TDM_RX_4 \
+ (AFE_PORT_ID_PRIMARY_TDM_RX + 0x08)
+#define AFE_PORT_ID_PRIMARY_TDM_RX_5 \
+ (AFE_PORT_ID_PRIMARY_TDM_RX + 0x0A)
+#define AFE_PORT_ID_PRIMARY_TDM_RX_6 \
+ (AFE_PORT_ID_PRIMARY_TDM_RX + 0x0C)
+#define AFE_PORT_ID_PRIMARY_TDM_RX_7 \
+ (AFE_PORT_ID_PRIMARY_TDM_RX + 0x0E)
+
+#define AFE_PORT_ID_PRIMARY_TDM_TX \
+ (AFE_PORT_ID_TDM_PORT_RANGE_START + 0x01)
+#define AFE_PORT_ID_PRIMARY_TDM_TX_1 \
+ (AFE_PORT_ID_PRIMARY_TDM_TX + 0x02)
+#define AFE_PORT_ID_PRIMARY_TDM_TX_2 \
+ (AFE_PORT_ID_PRIMARY_TDM_TX + 0x04)
+#define AFE_PORT_ID_PRIMARY_TDM_TX_3 \
+ (AFE_PORT_ID_PRIMARY_TDM_TX + 0x06)
+#define AFE_PORT_ID_PRIMARY_TDM_TX_4 \
+ (AFE_PORT_ID_PRIMARY_TDM_TX + 0x08)
+#define AFE_PORT_ID_PRIMARY_TDM_TX_5 \
+ (AFE_PORT_ID_PRIMARY_TDM_TX + 0x0A)
+#define AFE_PORT_ID_PRIMARY_TDM_TX_6 \
+ (AFE_PORT_ID_PRIMARY_TDM_TX + 0x0C)
+#define AFE_PORT_ID_PRIMARY_TDM_TX_7 \
+ (AFE_PORT_ID_PRIMARY_TDM_TX + 0x0E)
+
+#define AFE_PORT_ID_SECONDARY_TDM_RX \
+ (AFE_PORT_ID_TDM_PORT_RANGE_START + 0x10)
+#define AFE_PORT_ID_SECONDARY_TDM_RX_1 \
+ (AFE_PORT_ID_SECONDARY_TDM_RX + 0x02)
+#define AFE_PORT_ID_SECONDARY_TDM_RX_2 \
+ (AFE_PORT_ID_SECONDARY_TDM_RX + 0x04)
+#define AFE_PORT_ID_SECONDARY_TDM_RX_3 \
+ (AFE_PORT_ID_SECONDARY_TDM_RX + 0x06)
+#define AFE_PORT_ID_SECONDARY_TDM_RX_4 \
+ (AFE_PORT_ID_SECONDARY_TDM_RX + 0x08)
+#define AFE_PORT_ID_SECONDARY_TDM_RX_5 \
+ (AFE_PORT_ID_SECONDARY_TDM_RX + 0x0A)
+#define AFE_PORT_ID_SECONDARY_TDM_RX_6 \
+ (AFE_PORT_ID_SECONDARY_TDM_RX + 0x0C)
+#define AFE_PORT_ID_SECONDARY_TDM_RX_7 \
+ (AFE_PORT_ID_SECONDARY_TDM_RX + 0x0E)
+
+#define AFE_PORT_ID_SECONDARY_TDM_TX \
+ (AFE_PORT_ID_TDM_PORT_RANGE_START + 0x11)
+#define AFE_PORT_ID_SECONDARY_TDM_TX_1 \
+ (AFE_PORT_ID_SECONDARY_TDM_TX + 0x02)
+#define AFE_PORT_ID_SECONDARY_TDM_TX_2 \
+ (AFE_PORT_ID_SECONDARY_TDM_TX + 0x04)
+#define AFE_PORT_ID_SECONDARY_TDM_TX_3 \
+ (AFE_PORT_ID_SECONDARY_TDM_TX + 0x06)
+#define AFE_PORT_ID_SECONDARY_TDM_TX_4 \
+ (AFE_PORT_ID_SECONDARY_TDM_TX + 0x08)
+#define AFE_PORT_ID_SECONDARY_TDM_TX_5 \
+ (AFE_PORT_ID_SECONDARY_TDM_TX + 0x0A)
+#define AFE_PORT_ID_SECONDARY_TDM_TX_6 \
+ (AFE_PORT_ID_SECONDARY_TDM_TX + 0x0C)
+#define AFE_PORT_ID_SECONDARY_TDM_TX_7 \
+ (AFE_PORT_ID_SECONDARY_TDM_TX + 0x0E)
+
+#define AFE_PORT_ID_TERTIARY_TDM_RX \
+ (AFE_PORT_ID_TDM_PORT_RANGE_START + 0x20)
+#define AFE_PORT_ID_TERTIARY_TDM_RX_1 \
+ (AFE_PORT_ID_TERTIARY_TDM_RX + 0x02)
+#define AFE_PORT_ID_TERTIARY_TDM_RX_2 \
+ (AFE_PORT_ID_TERTIARY_TDM_RX + 0x04)
+#define AFE_PORT_ID_TERTIARY_TDM_RX_3 \
+ (AFE_PORT_ID_TERTIARY_TDM_RX + 0x06)
+#define AFE_PORT_ID_TERTIARY_TDM_RX_4 \
+ (AFE_PORT_ID_TERTIARY_TDM_RX + 0x08)
+#define AFE_PORT_ID_TERTIARY_TDM_RX_5 \
+ (AFE_PORT_ID_TERTIARY_TDM_RX + 0x0A)
+#define AFE_PORT_ID_TERTIARY_TDM_RX_6 \
+ (AFE_PORT_ID_TERTIARY_TDM_RX + 0x0C)
+#define AFE_PORT_ID_TERTIARY_TDM_RX_7 \
+ (AFE_PORT_ID_TERTIARY_TDM_RX + 0x0E)
+
+#define AFE_PORT_ID_TERTIARY_TDM_TX \
+ (AFE_PORT_ID_TDM_PORT_RANGE_START + 0x21)
+#define AFE_PORT_ID_TERTIARY_TDM_TX_1 \
+ (AFE_PORT_ID_TERTIARY_TDM_TX + 0x02)
+#define AFE_PORT_ID_TERTIARY_TDM_TX_2 \
+ (AFE_PORT_ID_TERTIARY_TDM_TX + 0x04)
+#define AFE_PORT_ID_TERTIARY_TDM_TX_3 \
+ (AFE_PORT_ID_TERTIARY_TDM_TX + 0x06)
+#define AFE_PORT_ID_TERTIARY_TDM_TX_4 \
+ (AFE_PORT_ID_TERTIARY_TDM_TX + 0x08)
+#define AFE_PORT_ID_TERTIARY_TDM_TX_5 \
+ (AFE_PORT_ID_TERTIARY_TDM_TX + 0x0A)
+#define AFE_PORT_ID_TERTIARY_TDM_TX_6 \
+ (AFE_PORT_ID_TERTIARY_TDM_TX + 0x0C)
+#define AFE_PORT_ID_TERTIARY_TDM_TX_7 \
+ (AFE_PORT_ID_TERTIARY_TDM_TX + 0x0E)
+
+#define AFE_PORT_ID_QUATERNARY_TDM_RX \
+ (AFE_PORT_ID_TDM_PORT_RANGE_START + 0x30)
+#define AFE_PORT_ID_QUATERNARY_TDM_RX_1 \
+ (AFE_PORT_ID_QUATERNARY_TDM_RX + 0x02)
+#define AFE_PORT_ID_QUATERNARY_TDM_RX_2 \
+ (AFE_PORT_ID_QUATERNARY_TDM_RX + 0x04)
+#define AFE_PORT_ID_QUATERNARY_TDM_RX_3 \
+ (AFE_PORT_ID_QUATERNARY_TDM_RX + 0x06)
+#define AFE_PORT_ID_QUATERNARY_TDM_RX_4 \
+ (AFE_PORT_ID_QUATERNARY_TDM_RX + 0x08)
+#define AFE_PORT_ID_QUATERNARY_TDM_RX_5 \
+ (AFE_PORT_ID_QUATERNARY_TDM_RX + 0x0A)
+#define AFE_PORT_ID_QUATERNARY_TDM_RX_6 \
+ (AFE_PORT_ID_QUATERNARY_TDM_RX + 0x0C)
+#define AFE_PORT_ID_QUATERNARY_TDM_RX_7 \
+ (AFE_PORT_ID_QUATERNARY_TDM_RX + 0x0E)
+
+#define AFE_PORT_ID_QUATERNARY_TDM_TX \
+ (AFE_PORT_ID_TDM_PORT_RANGE_START + 0x31)
+#define AFE_PORT_ID_QUATERNARY_TDM_TX_1 \
+ (AFE_PORT_ID_QUATERNARY_TDM_TX + 0x02)
+#define AFE_PORT_ID_QUATERNARY_TDM_TX_2 \
+ (AFE_PORT_ID_QUATERNARY_TDM_TX + 0x04)
+#define AFE_PORT_ID_QUATERNARY_TDM_TX_3 \
+ (AFE_PORT_ID_QUATERNARY_TDM_TX + 0x06)
+#define AFE_PORT_ID_QUATERNARY_TDM_TX_4 \
+ (AFE_PORT_ID_QUATERNARY_TDM_TX + 0x08)
+#define AFE_PORT_ID_QUATERNARY_TDM_TX_5 \
+ (AFE_PORT_ID_QUATERNARY_TDM_TX + 0x0A)
+#define AFE_PORT_ID_QUATERNARY_TDM_TX_6 \
+ (AFE_PORT_ID_QUATERNARY_TDM_TX + 0x0C)
+#define AFE_PORT_ID_QUATERNARY_TDM_TX_7 \
+ (AFE_PORT_ID_QUATERNARY_TDM_TX + 0x0E)
+
+#define AFE_PORT_ID_QUINARY_TDM_RX \
+ (AFE_PORT_ID_TDM_PORT_RANGE_START + 0x40)
+#define AFE_PORT_ID_QUINARY_TDM_RX_1 \
+ (AFE_PORT_ID_QUINARY_TDM_RX + 0x02)
+#define AFE_PORT_ID_QUINARY_TDM_RX_2 \
+ (AFE_PORT_ID_QUINARY_TDM_RX + 0x04)
+#define AFE_PORT_ID_QUINARY_TDM_RX_3 \
+ (AFE_PORT_ID_QUINARY_TDM_RX + 0x06)
+#define AFE_PORT_ID_QUINARY_TDM_RX_4 \
+ (AFE_PORT_ID_QUINARY_TDM_RX + 0x08)
+#define AFE_PORT_ID_QUINARY_TDM_RX_5 \
+ (AFE_PORT_ID_QUINARY_TDM_RX + 0x0A)
+#define AFE_PORT_ID_QUINARY_TDM_RX_6 \
+ (AFE_PORT_ID_QUINARY_TDM_RX + 0x0C)
+#define AFE_PORT_ID_QUINARY_TDM_RX_7 \
+ (AFE_PORT_ID_QUINARY_TDM_RX + 0x0E)
+
+#define AFE_PORT_ID_QUINARY_TDM_TX \
+ (AFE_PORT_ID_TDM_PORT_RANGE_START + 0x41)
+#define AFE_PORT_ID_QUINARY_TDM_TX_1 \
+ (AFE_PORT_ID_QUINARY_TDM_TX + 0x02)
+#define AFE_PORT_ID_QUINARY_TDM_TX_2 \
+ (AFE_PORT_ID_QUINARY_TDM_TX + 0x04)
+#define AFE_PORT_ID_QUINARY_TDM_TX_3 \
+ (AFE_PORT_ID_QUINARY_TDM_TX + 0x06)
+#define AFE_PORT_ID_QUINARY_TDM_TX_4 \
+ (AFE_PORT_ID_QUINARY_TDM_TX + 0x08)
+#define AFE_PORT_ID_QUINARY_TDM_TX_5 \
+ (AFE_PORT_ID_QUINARY_TDM_TX + 0x0A)
+#define AFE_PORT_ID_QUINARY_TDM_TX_6 \
+ (AFE_PORT_ID_QUINARY_TDM_TX + 0x0C)
+#define AFE_PORT_ID_QUINARY_TDM_TX_7 \
+ (AFE_PORT_ID_QUINARY_TDM_TX + 0x0E)
+
+#define Q6AFE_LPASS_MODE_CLK1_VALID 1
+#define Q6AFE_LPASS_MODE_CLK2_VALID 2
+#define Q6AFE_LPASS_CLK_SRC_INTERNAL 1
+#define Q6AFE_LPASS_CLK_ROOT_DEFAULT 0
+#define AFE_API_VERSION_TDM_CONFIG 1
+#define AFE_API_VERSION_SLOT_MAPPING_CONFIG 1
+
+#define TIMEOUT_MS 1000
+#define AFE_CMD_RESP_AVAIL 0
+#define AFE_CMD_RESP_NONE 1
+
+struct q6afe {
+ struct apr_device *apr;
+ struct device *dev;
+ struct q6core_svc_api_info ainfo;
+ struct mutex lock;
+ struct list_head port_list;
+ spinlock_t port_list_lock;
+ struct platform_device *pdev_dais;
+};
+
+struct afe_port_cmd_device_start {
+ u16 port_id;
+ u16 reserved;
+} __packed;
+
+struct afe_port_cmd_device_stop {
+ u16 port_id;
+ u16 reserved;
+/* Reserved for 32-bit alignment. This field must be set to 0.*/
+} __packed;
+
+struct afe_port_param_data_v2 {
+ u32 module_id;
+ u32 param_id;
+ u16 param_size;
+ u16 reserved;
+} __packed;
+
+struct afe_svc_cmd_set_param {
+ uint32_t payload_size;
+ uint32_t payload_address_lsw;
+ uint32_t payload_address_msw;
+ uint32_t mem_map_handle;
+} __packed;
+
+struct afe_port_cmd_set_param_v2 {
+ u16 port_id;
+ u16 payload_size;
+ u32 payload_address_lsw;
+ u32 payload_address_msw;
+ u32 mem_map_handle;
+} __packed;
+
+struct afe_param_id_hdmi_multi_chan_audio_cfg {
+ u32 hdmi_cfg_minor_version;
+ u16 datatype;
+ u16 channel_allocation;
+ u32 sample_rate;
+ u16 bit_width;
+ u16 reserved;
+} __packed;
+
+struct afe_param_id_slimbus_cfg {
+ u32 sb_cfg_minor_version;
+/* Minor version used for tracking the version of the SLIMBUS
+ * configuration interface.
+ * Supported values: #AFE_API_VERSION_SLIMBUS_CONFIG
+ */
+
+ u16 slimbus_dev_id;
+/* SLIMbus hardware device ID, which is required to handle
+ * multiple SLIMbus hardware blocks.
+ * Supported values: - #AFE_SLIMBUS_DEVICE_1 - #AFE_SLIMBUS_DEVICE_2
+ */
+ u16 bit_width;
+/* Bit width of the sample.
+ * Supported values: 16, 24
+ */
+ u16 data_format;
+/* Data format supported by the SLIMbus hardware. The default is
+ * 0 (#AFE_SB_DATA_FORMAT_NOT_INDICATED), which indicates the
+ * hardware does not perform any format conversions before the data
+ * transfer.
+ */
+ u16 num_channels;
+/* Number of channels.
+ * Supported values: 1 to #AFE_PORT_MAX_AUDIO_CHAN_CNT
+ */
+ u8 shared_ch_mapping[AFE_PORT_MAX_AUDIO_CHAN_CNT];
+/* Mapping of shared channel IDs (128 to 255) to which the
+ * master port is to be connected.
+ * Shared_channel_mapping[i] represents the shared channel assigned
+ * for audio channel i in multichannel audio data.
+ */
+ u32 sample_rate;
+/* Sampling rate of the port.
+ * Supported values:
+ * - #AFE_PORT_SAMPLE_RATE_8K
+ * - #AFE_PORT_SAMPLE_RATE_16K
+ * - #AFE_PORT_SAMPLE_RATE_48K
+ * - #AFE_PORT_SAMPLE_RATE_96K
+ * - #AFE_PORT_SAMPLE_RATE_192K
+ */
+} __packed;
+
+struct afe_clk_cfg {
+ u32 i2s_cfg_minor_version;
+ u32 clk_val1;
+ u32 clk_val2;
+ u16 clk_src;
+ u16 clk_root;
+ u16 clk_set_mode;
+ u16 reserved;
+} __packed;
+
+struct afe_digital_clk_cfg {
+ u32 i2s_cfg_minor_version;
+ u32 clk_val;
+ u16 clk_root;
+ u16 reserved;
+} __packed;
+
+struct afe_param_id_i2s_cfg {
+ u32 i2s_cfg_minor_version;
+ u16 bit_width;
+ u16 channel_mode;
+ u16 mono_stereo;
+ u16 ws_src;
+ u32 sample_rate;
+ u16 data_format;
+ u16 reserved;
+} __packed;
+
+struct afe_param_id_tdm_cfg {
+ u32 tdm_cfg_minor_version;
+ u32 num_channels;
+ u32 sample_rate;
+ u32 bit_width;
+ u16 data_format;
+ u16 sync_mode;
+ u16 sync_src;
+ u16 nslots_per_frame;
+ u16 ctrl_data_out_enable;
+ u16 ctrl_invert_sync_pulse;
+ u16 ctrl_sync_data_delay;
+ u16 slot_width;
+ u32 slot_mask;
+} __packed;
+
+union afe_port_config {
+ struct afe_param_id_hdmi_multi_chan_audio_cfg hdmi_multi_ch;
+ struct afe_param_id_slimbus_cfg slim_cfg;
+ struct afe_param_id_i2s_cfg i2s_cfg;
+ struct afe_param_id_tdm_cfg tdm_cfg;
+} __packed;
+
+
+struct afe_clk_set {
+ uint32_t clk_set_minor_version;
+ uint32_t clk_id;
+ uint32_t clk_freq_in_hz;
+ uint16_t clk_attri;
+ uint16_t clk_root;
+ uint32_t enable;
+};
+
+struct afe_param_id_slot_mapping_cfg {
+ u32 minor_version;
+ u16 num_channels;
+ u16 bitwidth;
+ u32 data_align_type;
+ u16 ch_mapping[AFE_PORT_MAX_AUDIO_CHAN_CNT];
+} __packed;
+
+struct q6afe_port {
+ wait_queue_head_t wait;
+ union afe_port_config port_cfg;
+ struct afe_param_id_slot_mapping_cfg *scfg;
+ struct aprv2_ibasic_rsp_result_t result;
+ int token;
+ int id;
+ int cfg_type;
+ struct q6afe *afe;
+ struct kref refcount;
+ struct list_head node;
+};
+
+struct afe_port_map {
+ int port_id;
+ int token;
+ int is_rx;
+ int is_dig_pcm;
+};
+
+/*
+ * Mapping between Virtual Port IDs to DSP AFE Port ID
+ * On B Family SoCs DSP Port IDs are consistent across multiple SoCs
+ * on A Family SoCs DSP port IDs are same as virtual Port IDs.
+ */
+
+static struct afe_port_map port_maps[AFE_PORT_MAX] = {
+ [HDMI_RX] = { AFE_PORT_ID_MULTICHAN_HDMI_RX, HDMI_RX, 1, 1},
+ [SLIMBUS_0_RX] = { AFE_PORT_ID_SLIMBUS_MULTI_CHAN_0_RX,
+ SLIMBUS_0_RX, 1, 1},
+ [SLIMBUS_1_RX] = { AFE_PORT_ID_SLIMBUS_MULTI_CHAN_1_RX,
+ SLIMBUS_1_RX, 1, 1},
+ [SLIMBUS_2_RX] = { AFE_PORT_ID_SLIMBUS_MULTI_CHAN_2_RX,
+ SLIMBUS_2_RX, 1, 1},
+ [SLIMBUS_3_RX] = { AFE_PORT_ID_SLIMBUS_MULTI_CHAN_3_RX,
+ SLIMBUS_3_RX, 1, 1},
+ [SLIMBUS_4_RX] = { AFE_PORT_ID_SLIMBUS_MULTI_CHAN_4_RX,
+ SLIMBUS_4_RX, 1, 1},
+ [SLIMBUS_5_RX] = { AFE_PORT_ID_SLIMBUS_MULTI_CHAN_5_RX,
+ SLIMBUS_5_RX, 1, 1},
+ [SLIMBUS_6_RX] = { AFE_PORT_ID_SLIMBUS_MULTI_CHAN_6_RX,
+ SLIMBUS_6_RX, 1, 1},
+ [PRIMARY_MI2S_RX] = { AFE_PORT_ID_PRIMARY_MI2S_RX,
+ PRIMARY_MI2S_RX, 1, 1},
+ [PRIMARY_MI2S_TX] = { AFE_PORT_ID_PRIMARY_MI2S_TX,
+ PRIMARY_MI2S_RX, 0, 1},
+ [SECONDARY_MI2S_RX] = { AFE_PORT_ID_SECONDARY_MI2S_RX,
+ SECONDARY_MI2S_RX, 1, 1},
+ [SECONDARY_MI2S_TX] = { AFE_PORT_ID_SECONDARY_MI2S_TX,
+ SECONDARY_MI2S_TX, 0, 1},
+ [TERTIARY_MI2S_RX] = { AFE_PORT_ID_TERTIARY_MI2S_RX,
+ TERTIARY_MI2S_RX, 1, 1},
+ [TERTIARY_MI2S_TX] = { AFE_PORT_ID_TERTIARY_MI2S_TX,
+ TERTIARY_MI2S_TX, 0, 1},
+ [QUATERNARY_MI2S_RX] = { AFE_PORT_ID_QUATERNARY_MI2S_RX,
+ QUATERNARY_MI2S_RX, 1, 1},
+ [QUATERNARY_MI2S_TX] = { AFE_PORT_ID_QUATERNARY_MI2S_TX,
+ QUATERNARY_MI2S_TX, 0, 1},
+ [PRIMARY_TDM_RX_0] = { AFE_PORT_ID_PRIMARY_TDM_RX,
+ PRIMARY_TDM_RX_0, 1, 1},
+ [PRIMARY_TDM_TX_0] = { AFE_PORT_ID_PRIMARY_TDM_TX,
+ PRIMARY_TDM_TX_0, 0, 1},
+ [PRIMARY_TDM_RX_1] = { AFE_PORT_ID_PRIMARY_TDM_RX_1,
+ PRIMARY_TDM_RX_1, 1, 1},
+ [PRIMARY_TDM_TX_1] = { AFE_PORT_ID_PRIMARY_TDM_TX_1,
+ PRIMARY_TDM_TX_1, 0, 1},
+ [PRIMARY_TDM_RX_2] = { AFE_PORT_ID_PRIMARY_TDM_RX_2,
+ PRIMARY_TDM_RX_2, 1, 1},
+ [PRIMARY_TDM_TX_2] = { AFE_PORT_ID_PRIMARY_TDM_TX_2,
+ PRIMARY_TDM_TX_2, 0, 1},
+ [PRIMARY_TDM_RX_3] = { AFE_PORT_ID_PRIMARY_TDM_RX_3,
+ PRIMARY_TDM_RX_3, 1, 1},
+ [PRIMARY_TDM_TX_3] = { AFE_PORT_ID_PRIMARY_TDM_TX_3,
+ PRIMARY_TDM_TX_3, 0, 1},
+ [PRIMARY_TDM_RX_4] = { AFE_PORT_ID_PRIMARY_TDM_RX_4,
+ PRIMARY_TDM_RX_4, 1, 1},
+ [PRIMARY_TDM_TX_4] = { AFE_PORT_ID_PRIMARY_TDM_TX_4,
+ PRIMARY_TDM_TX_4, 0, 1},
+ [PRIMARY_TDM_RX_5] = { AFE_PORT_ID_PRIMARY_TDM_RX_5,
+ PRIMARY_TDM_RX_5, 1, 1},
+ [PRIMARY_TDM_TX_5] = { AFE_PORT_ID_PRIMARY_TDM_TX_5,
+ PRIMARY_TDM_TX_5, 0, 1},
+ [PRIMARY_TDM_RX_6] = { AFE_PORT_ID_PRIMARY_TDM_RX_6,
+ PRIMARY_TDM_RX_6, 1, 1},
+ [PRIMARY_TDM_TX_6] = { AFE_PORT_ID_PRIMARY_TDM_TX_6,
+ PRIMARY_TDM_TX_6, 0, 1},
+ [PRIMARY_TDM_RX_7] = { AFE_PORT_ID_PRIMARY_TDM_RX_7,
+ PRIMARY_TDM_RX_7, 1, 1},
+ [PRIMARY_TDM_TX_7] = { AFE_PORT_ID_PRIMARY_TDM_TX_7,
+ PRIMARY_TDM_TX_7, 0, 1},
+ [SECONDARY_TDM_RX_0] = { AFE_PORT_ID_SECONDARY_TDM_RX,
+ SECONDARY_TDM_RX_0, 1, 1},
+ [SECONDARY_TDM_TX_0] = { AFE_PORT_ID_SECONDARY_TDM_TX,
+ SECONDARY_TDM_TX_0, 0, 1},
+ [SECONDARY_TDM_RX_1] = { AFE_PORT_ID_SECONDARY_TDM_RX_1,
+ SECONDARY_TDM_RX_1, 1, 1},
+ [SECONDARY_TDM_TX_1] = { AFE_PORT_ID_SECONDARY_TDM_TX_1,
+ SECONDARY_TDM_TX_1, 0, 1},
+ [SECONDARY_TDM_RX_2] = { AFE_PORT_ID_SECONDARY_TDM_RX_2,
+ SECONDARY_TDM_RX_2, 1, 1},
+ [SECONDARY_TDM_TX_2] = { AFE_PORT_ID_SECONDARY_TDM_TX_2,
+ SECONDARY_TDM_TX_2, 0, 1},
+ [SECONDARY_TDM_RX_3] = { AFE_PORT_ID_SECONDARY_TDM_RX_3,
+ SECONDARY_TDM_RX_3, 1, 1},
+ [SECONDARY_TDM_TX_3] = { AFE_PORT_ID_SECONDARY_TDM_TX_3,
+ SECONDARY_TDM_TX_3, 0, 1},
+ [SECONDARY_TDM_RX_4] = { AFE_PORT_ID_SECONDARY_TDM_RX_4,
+ SECONDARY_TDM_RX_4, 1, 1},
+ [SECONDARY_TDM_TX_4] = { AFE_PORT_ID_SECONDARY_TDM_TX_4,
+ SECONDARY_TDM_TX_4, 0, 1},
+ [SECONDARY_TDM_RX_5] = { AFE_PORT_ID_SECONDARY_TDM_RX_5,
+ SECONDARY_TDM_RX_5, 1, 1},
+ [SECONDARY_TDM_TX_5] = { AFE_PORT_ID_SECONDARY_TDM_TX_5,
+ SECONDARY_TDM_TX_5, 0, 1},
+ [SECONDARY_TDM_RX_6] = { AFE_PORT_ID_SECONDARY_TDM_RX_6,
+ SECONDARY_TDM_RX_6, 1, 1},
+ [SECONDARY_TDM_TX_6] = { AFE_PORT_ID_SECONDARY_TDM_TX_6,
+ SECONDARY_TDM_TX_6, 0, 1},
+ [SECONDARY_TDM_RX_7] = { AFE_PORT_ID_SECONDARY_TDM_RX_7,
+ SECONDARY_TDM_RX_7, 1, 1},
+ [SECONDARY_TDM_TX_7] = { AFE_PORT_ID_SECONDARY_TDM_TX_7,
+ SECONDARY_TDM_TX_7, 0, 1},
+ [TERTIARY_TDM_RX_0] = { AFE_PORT_ID_TERTIARY_TDM_RX,
+ TERTIARY_TDM_RX_0, 1, 1},
+ [TERTIARY_TDM_TX_0] = { AFE_PORT_ID_TERTIARY_TDM_TX,
+ TERTIARY_TDM_TX_0, 0, 1},
+ [TERTIARY_TDM_RX_1] = { AFE_PORT_ID_TERTIARY_TDM_RX_1,
+ TERTIARY_TDM_RX_1, 1, 1},
+ [TERTIARY_TDM_TX_1] = { AFE_PORT_ID_TERTIARY_TDM_TX_1,
+ TERTIARY_TDM_TX_1, 0, 1},
+ [TERTIARY_TDM_RX_2] = { AFE_PORT_ID_TERTIARY_TDM_RX_2,
+ TERTIARY_TDM_RX_2, 1, 1},
+ [TERTIARY_TDM_TX_2] = { AFE_PORT_ID_TERTIARY_TDM_TX_2,
+ TERTIARY_TDM_TX_2, 0, 1},
+ [TERTIARY_TDM_RX_3] = { AFE_PORT_ID_TERTIARY_TDM_RX_3,
+ TERTIARY_TDM_RX_3, 1, 1},
+ [TERTIARY_TDM_TX_3] = { AFE_PORT_ID_TERTIARY_TDM_TX_3,
+ TERTIARY_TDM_TX_3, 0, 1},
+ [TERTIARY_TDM_RX_4] = { AFE_PORT_ID_TERTIARY_TDM_RX_4,
+ TERTIARY_TDM_RX_4, 1, 1},
+ [TERTIARY_TDM_TX_4] = { AFE_PORT_ID_TERTIARY_TDM_TX_4,
+ TERTIARY_TDM_TX_4, 0, 1},
+ [TERTIARY_TDM_RX_5] = { AFE_PORT_ID_TERTIARY_TDM_RX_5,
+ TERTIARY_TDM_RX_5, 1, 1},
+ [TERTIARY_TDM_TX_5] = { AFE_PORT_ID_TERTIARY_TDM_TX_5,
+ TERTIARY_TDM_TX_5, 0, 1},
+ [TERTIARY_TDM_RX_6] = { AFE_PORT_ID_TERTIARY_TDM_RX_6,
+ TERTIARY_TDM_RX_6, 1, 1},
+ [TERTIARY_TDM_TX_6] = { AFE_PORT_ID_TERTIARY_TDM_TX_6,
+ TERTIARY_TDM_TX_6, 0, 1},
+ [TERTIARY_TDM_RX_7] = { AFE_PORT_ID_TERTIARY_TDM_RX_7,
+ TERTIARY_TDM_RX_7, 1, 1},
+ [TERTIARY_TDM_TX_7] = { AFE_PORT_ID_TERTIARY_TDM_TX_7,
+ TERTIARY_TDM_TX_7, 0, 1},
+ [QUATERNARY_TDM_RX_0] = { AFE_PORT_ID_QUATERNARY_TDM_RX,
+ QUATERNARY_TDM_RX_0, 1, 1},
+ [QUATERNARY_TDM_TX_0] = { AFE_PORT_ID_QUATERNARY_TDM_TX,
+ QUATERNARY_TDM_TX_0, 0, 1},
+ [QUATERNARY_TDM_RX_1] = { AFE_PORT_ID_QUATERNARY_TDM_RX_1,
+ QUATERNARY_TDM_RX_1, 1, 1},
+ [QUATERNARY_TDM_TX_1] = { AFE_PORT_ID_QUATERNARY_TDM_TX_1,
+ QUATERNARY_TDM_TX_1, 0, 1},
+ [QUATERNARY_TDM_RX_2] = { AFE_PORT_ID_QUATERNARY_TDM_RX_2,
+ QUATERNARY_TDM_RX_2, 1, 1},
+ [QUATERNARY_TDM_TX_2] = { AFE_PORT_ID_QUATERNARY_TDM_TX_2,
+ QUATERNARY_TDM_TX_2, 0, 1},
+ [QUATERNARY_TDM_RX_3] = { AFE_PORT_ID_QUATERNARY_TDM_RX_3,
+ QUATERNARY_TDM_RX_3, 1, 1},
+ [QUATERNARY_TDM_TX_3] = { AFE_PORT_ID_QUATERNARY_TDM_TX_3,
+ QUATERNARY_TDM_TX_3, 0, 1},
+ [QUATERNARY_TDM_RX_4] = { AFE_PORT_ID_QUATERNARY_TDM_RX_4,
+ QUATERNARY_TDM_RX_4, 1, 1},
+ [QUATERNARY_TDM_TX_4] = { AFE_PORT_ID_QUATERNARY_TDM_TX_4,
+ QUATERNARY_TDM_TX_4, 0, 1},
+ [QUATERNARY_TDM_RX_5] = { AFE_PORT_ID_QUATERNARY_TDM_RX_5,
+ QUATERNARY_TDM_RX_5, 1, 1},
+ [QUATERNARY_TDM_TX_5] = { AFE_PORT_ID_QUATERNARY_TDM_TX_5,
+ QUATERNARY_TDM_TX_5, 0, 1},
+ [QUATERNARY_TDM_RX_6] = { AFE_PORT_ID_QUATERNARY_TDM_RX_6,
+ QUATERNARY_TDM_RX_6, 1, 1},
+ [QUATERNARY_TDM_TX_6] = { AFE_PORT_ID_QUATERNARY_TDM_TX_6,
+ QUATERNARY_TDM_TX_6, 0, 1},
+ [QUATERNARY_TDM_RX_7] = { AFE_PORT_ID_QUATERNARY_TDM_RX_7,
+ QUATERNARY_TDM_RX_7, 1, 1},
+ [QUATERNARY_TDM_TX_7] = { AFE_PORT_ID_QUATERNARY_TDM_TX_7,
+ QUATERNARY_TDM_TX_7, 0, 1},
+ [QUINARY_TDM_RX_0] = { AFE_PORT_ID_QUINARY_TDM_RX,
+ QUINARY_TDM_RX_0, 1, 1},
+ [QUINARY_TDM_TX_0] = { AFE_PORT_ID_QUINARY_TDM_TX,
+ QUINARY_TDM_TX_0, 0, 1},
+ [QUINARY_TDM_RX_1] = { AFE_PORT_ID_QUINARY_TDM_RX_1,
+ QUINARY_TDM_RX_1, 1, 1},
+ [QUINARY_TDM_TX_1] = { AFE_PORT_ID_QUINARY_TDM_TX_1,
+ QUINARY_TDM_TX_1, 0, 1},
+ [QUINARY_TDM_RX_2] = { AFE_PORT_ID_QUINARY_TDM_RX_2,
+ QUINARY_TDM_RX_2, 1, 1},
+ [QUINARY_TDM_TX_2] = { AFE_PORT_ID_QUINARY_TDM_TX_2,
+ QUINARY_TDM_TX_2, 0, 1},
+ [QUINARY_TDM_RX_3] = { AFE_PORT_ID_QUINARY_TDM_RX_3,
+ QUINARY_TDM_RX_3, 1, 1},
+ [QUINARY_TDM_TX_3] = { AFE_PORT_ID_QUINARY_TDM_TX_3,
+ QUINARY_TDM_TX_3, 0, 1},
+ [QUINARY_TDM_RX_4] = { AFE_PORT_ID_QUINARY_TDM_RX_4,
+ QUINARY_TDM_RX_4, 1, 1},
+ [QUINARY_TDM_TX_4] = { AFE_PORT_ID_QUINARY_TDM_TX_4,
+ QUINARY_TDM_TX_4, 0, 1},
+ [QUINARY_TDM_RX_5] = { AFE_PORT_ID_QUINARY_TDM_RX_5,
+ QUINARY_TDM_RX_5, 1, 1},
+ [QUINARY_TDM_TX_5] = { AFE_PORT_ID_QUINARY_TDM_TX_5,
+ QUINARY_TDM_TX_5, 0, 1},
+ [QUINARY_TDM_RX_6] = { AFE_PORT_ID_QUINARY_TDM_RX_6,
+ QUINARY_TDM_RX_6, 1, 1},
+ [QUINARY_TDM_TX_6] = { AFE_PORT_ID_QUINARY_TDM_TX_6,
+ QUINARY_TDM_TX_6, 0, 1},
+ [QUINARY_TDM_RX_7] = { AFE_PORT_ID_QUINARY_TDM_RX_7,
+ QUINARY_TDM_RX_7, 1, 1},
+ [QUINARY_TDM_TX_7] = { AFE_PORT_ID_QUINARY_TDM_TX_7,
+ QUINARY_TDM_TX_7, 0, 1},
+};
+
+static void q6afe_port_free(struct kref *ref)
+{
+ struct q6afe_port *port;
+ struct q6afe *afe;
+ unsigned long flags;
+
+ port = container_of(ref, struct q6afe_port, refcount);
+ afe = port->afe;
+ spin_lock_irqsave(&afe->port_list_lock, flags);
+ list_del(&port->node);
+ spin_unlock_irqrestore(&afe->port_list_lock, flags);
+ kfree(port->scfg);
+ kfree(port);
+}
+
+static struct q6afe_port *q6afe_find_port(struct q6afe *afe, int token)
+{
+ struct q6afe_port *p = NULL;
+ struct q6afe_port *ret = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&afe->port_list_lock, flags);
+ list_for_each_entry(p, &afe->port_list, node)
+ if (p->token == token) {
+ ret = p;
+ kref_get(&p->refcount);
+ break;
+ }
+
+ spin_unlock_irqrestore(&afe->port_list_lock, flags);
+ return ret;
+}
+
+static int q6afe_callback(struct apr_device *adev, struct apr_resp_pkt *data)
+{
+ struct q6afe *afe = dev_get_drvdata(&adev->dev);
+ struct aprv2_ibasic_rsp_result_t *res;
+ struct apr_hdr *hdr = &data->hdr;
+ struct q6afe_port *port;
+
+ if (!data->payload_size)
+ return 0;
+
+ res = data->payload;
+ switch (hdr->opcode) {
+ case APR_BASIC_RSP_RESULT: {
+ if (res->status) {
+ dev_err(afe->dev, "cmd = 0x%x returned error = 0x%x\n",
+ res->opcode, res->status);
+ }
+ switch (res->opcode) {
+ case AFE_PORT_CMD_SET_PARAM_V2:
+ case AFE_PORT_CMD_DEVICE_STOP:
+ case AFE_PORT_CMD_DEVICE_START:
+ case AFE_SVC_CMD_SET_PARAM:
+ port = q6afe_find_port(afe, hdr->token);
+ if (port) {
+ port->result = *res;
+ wake_up(&port->wait);
+ kref_put(&port->refcount, q6afe_port_free);
+ }
+ break;
+ default:
+ dev_err(afe->dev, "Unknown cmd 0x%x\n", res->opcode);
+ break;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * q6afe_get_port_id() - Get port id from a given port index
+ *
+ * @index: port index
+ *
+ * Return: Will be an negative on error or valid port_id on success
+ */
+int q6afe_get_port_id(int index)
+{
+ if (index < 0 || index > AFE_PORT_MAX)
+ return -EINVAL;
+
+ return port_maps[index].port_id;
+}
+EXPORT_SYMBOL_GPL(q6afe_get_port_id);
+
+static int afe_apr_send_pkt(struct q6afe *afe, struct apr_pkt *pkt,
+ struct q6afe_port *port)
+{
+ wait_queue_head_t *wait = &port->wait;
+ struct apr_hdr *hdr = &pkt->hdr;
+ int ret;
+
+ mutex_lock(&afe->lock);
+ port->result.opcode = 0;
+ port->result.status = 0;
+
+ ret = apr_send_pkt(afe->apr, pkt);
+ if (ret < 0) {
+ dev_err(afe->dev, "packet not transmitted (%d)\n", ret);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = wait_event_timeout(*wait, (port->result.opcode == hdr->opcode),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ ret = -ETIMEDOUT;
+ } else if (port->result.status > 0) {
+ dev_err(afe->dev, "DSP returned error[%x]\n",
+ port->result.status);
+ ret = -EINVAL;
+ } else {
+ ret = 0;
+ }
+
+err:
+ mutex_unlock(&afe->lock);
+
+ return ret;
+}
+
+static int q6afe_port_set_param(struct q6afe_port *port, void *data,
+ int param_id, int module_id, int psize)
+{
+ struct afe_svc_cmd_set_param *param;
+ struct afe_port_param_data_v2 *pdata;
+ struct q6afe *afe = port->afe;
+ struct apr_pkt *pkt;
+ u16 port_id = port->id;
+ int ret, pkt_size;
+ void *p, *pl;
+
+ pkt_size = APR_HDR_SIZE + sizeof(*param) + sizeof(*pdata) + psize;
+ p = kzalloc(pkt_size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ param = p + APR_HDR_SIZE;
+ pdata = p + APR_HDR_SIZE + sizeof(*param);
+ pl = p + APR_HDR_SIZE + sizeof(*param) + sizeof(*pdata);
+ memcpy(pl, data, psize);
+
+ pkt->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ pkt->hdr.pkt_size = pkt_size;
+ pkt->hdr.src_port = 0;
+ pkt->hdr.dest_port = 0;
+ pkt->hdr.token = port->token;
+ pkt->hdr.opcode = AFE_SVC_CMD_SET_PARAM;
+
+ param->payload_size = sizeof(*pdata) + psize;
+ param->payload_address_lsw = 0x00;
+ param->payload_address_msw = 0x00;
+ param->mem_map_handle = 0x00;
+ pdata->module_id = module_id;
+ pdata->param_id = param_id;
+ pdata->param_size = psize;
+
+ ret = afe_apr_send_pkt(afe, pkt, port);
+ if (ret)
+ dev_err(afe->dev, "AFE enable for port 0x%x failed %d\n",
+ port_id, ret);
+
+ kfree(pkt);
+ return ret;
+}
+
+static int q6afe_port_set_param_v2(struct q6afe_port *port, void *data,
+ int param_id, int module_id, int psize)
+{
+ struct afe_port_cmd_set_param_v2 *param;
+ struct afe_port_param_data_v2 *pdata;
+ struct q6afe *afe = port->afe;
+ struct apr_pkt *pkt;
+ u16 port_id = port->id;
+ int ret, pkt_size;
+ void *p, *pl;
+
+ pkt_size = APR_HDR_SIZE + sizeof(*param) + sizeof(*pdata) + psize;
+ p = kzalloc(pkt_size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ param = p + APR_HDR_SIZE;
+ pdata = p + APR_HDR_SIZE + sizeof(*param);
+ pl = p + APR_HDR_SIZE + sizeof(*param) + sizeof(*pdata);
+ memcpy(pl, data, psize);
+
+ pkt->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ pkt->hdr.pkt_size = pkt_size;
+ pkt->hdr.src_port = 0;
+ pkt->hdr.dest_port = 0;
+ pkt->hdr.token = port->token;
+ pkt->hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+
+ param->port_id = port_id;
+ param->payload_size = sizeof(*pdata) + psize;
+ param->payload_address_lsw = 0x00;
+ param->payload_address_msw = 0x00;
+ param->mem_map_handle = 0x00;
+ pdata->module_id = module_id;
+ pdata->param_id = param_id;
+ pdata->param_size = psize;
+
+ ret = afe_apr_send_pkt(afe, pkt, port);
+ if (ret)
+ dev_err(afe->dev, "AFE enable for port 0x%x failed %d\n",
+ port_id, ret);
+
+ kfree(pkt);
+ return ret;
+}
+
+static int q6afe_set_lpass_clock(struct q6afe_port *port,
+ struct afe_clk_cfg *cfg)
+{
+ return q6afe_port_set_param_v2(port, cfg,
+ AFE_PARAM_ID_LPAIF_CLK_CONFIG,
+ AFE_MODULE_AUDIO_DEV_INTERFACE,
+ sizeof(*cfg));
+}
+
+static int q6afe_set_lpass_clock_v2(struct q6afe_port *port,
+ struct afe_clk_set *cfg)
+{
+ return q6afe_port_set_param(port, cfg, AFE_PARAM_ID_CLOCK_SET,
+ AFE_MODULE_CLOCK_SET, sizeof(*cfg));
+}
+
+static int q6afe_set_digital_codec_core_clock(struct q6afe_port *port,
+ struct afe_digital_clk_cfg *cfg)
+{
+ return q6afe_port_set_param_v2(port, cfg,
+ AFE_PARAM_ID_INT_DIGITAL_CDC_CLK_CONFIG,
+ AFE_MODULE_AUDIO_DEV_INTERFACE,
+ sizeof(*cfg));
+}
+
+int q6afe_port_set_sysclk(struct q6afe_port *port, int clk_id,
+ int clk_src, int clk_root,
+ unsigned int freq, int dir)
+{
+ struct afe_clk_cfg ccfg = {0,};
+ struct afe_clk_set cset = {0,};
+ struct afe_digital_clk_cfg dcfg = {0,};
+ int ret;
+
+ switch (clk_id) {
+ case LPAIF_DIG_CLK:
+ dcfg.i2s_cfg_minor_version = AFE_API_VERSION_I2S_CONFIG;
+ dcfg.clk_val = freq;
+ dcfg.clk_root = clk_root;
+ ret = q6afe_set_digital_codec_core_clock(port, &dcfg);
+ break;
+ case LPAIF_BIT_CLK:
+ ccfg.i2s_cfg_minor_version = AFE_API_VERSION_I2S_CONFIG;
+ ccfg.clk_val1 = freq;
+ ccfg.clk_src = clk_src;
+ ccfg.clk_root = clk_root;
+ ccfg.clk_set_mode = Q6AFE_LPASS_MODE_CLK1_VALID;
+ ret = q6afe_set_lpass_clock(port, &ccfg);
+ break;
+
+ case LPAIF_OSR_CLK:
+ ccfg.i2s_cfg_minor_version = AFE_API_VERSION_I2S_CONFIG;
+ ccfg.clk_val2 = freq;
+ ccfg.clk_src = clk_src;
+ ccfg.clk_root = clk_root;
+ ccfg.clk_set_mode = Q6AFE_LPASS_MODE_CLK2_VALID;
+ ret = q6afe_set_lpass_clock(port, &ccfg);
+ break;
+ case Q6AFE_LPASS_CLK_ID_PRI_MI2S_IBIT ... Q6AFE_LPASS_CLK_ID_QUI_MI2S_OSR:
+ case Q6AFE_LPASS_CLK_ID_MCLK_1 ... Q6AFE_LPASS_CLK_ID_INT_MCLK_1:
+ case Q6AFE_LPASS_CLK_ID_PRI_TDM_IBIT ... Q6AFE_LPASS_CLK_ID_QUIN_TDM_EBIT:
+ cset.clk_set_minor_version = AFE_API_VERSION_CLOCK_SET;
+ cset.clk_id = clk_id;
+ cset.clk_freq_in_hz = freq;
+ cset.clk_attri = clk_src;
+ cset.clk_root = clk_root;
+ cset.enable = !!freq;
+ ret = q6afe_set_lpass_clock_v2(port, &cset);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(q6afe_port_set_sysclk);
+
+/**
+ * q6afe_port_stop() - Stop a afe port
+ *
+ * @port: Instance of port to stop
+ *
+ * Return: Will be an negative on packet size on success.
+ */
+int q6afe_port_stop(struct q6afe_port *port)
+{
+ struct afe_port_cmd_device_stop *stop;
+ struct q6afe *afe = port->afe;
+ struct apr_pkt *pkt;
+ int port_id = port->id;
+ int ret = 0;
+ int index, pkt_size;
+ void *p;
+
+ port_id = port->id;
+ index = port->token;
+ if (index < 0 || index > AFE_PORT_MAX) {
+ dev_err(afe->dev, "AFE port index[%d] invalid!\n", index);
+ return -EINVAL;
+ }
+
+ pkt_size = APR_HDR_SIZE + sizeof(*stop);
+ p = kzalloc(pkt_size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ stop = p + APR_HDR_SIZE;
+
+ pkt->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ pkt->hdr.pkt_size = pkt_size;
+ pkt->hdr.src_port = 0;
+ pkt->hdr.dest_port = 0;
+ pkt->hdr.token = index;
+ pkt->hdr.opcode = AFE_PORT_CMD_DEVICE_STOP;
+ stop->port_id = port_id;
+ stop->reserved = 0;
+
+ ret = afe_apr_send_pkt(afe, pkt, port);
+ if (ret)
+ dev_err(afe->dev, "AFE close failed %d\n", ret);
+
+ kfree(pkt);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(q6afe_port_stop);
+
+/**
+ * q6afe_slim_port_prepare() - Prepare slim afe port.
+ *
+ * @port: Instance of afe port
+ * @cfg: SLIM configuration for the afe port
+ *
+ */
+void q6afe_slim_port_prepare(struct q6afe_port *port,
+ struct q6afe_slim_cfg *cfg)
+{
+ union afe_port_config *pcfg = &port->port_cfg;
+
+ pcfg->slim_cfg.sb_cfg_minor_version = AFE_API_VERSION_SLIMBUS_CONFIG;
+ pcfg->slim_cfg.sample_rate = cfg->sample_rate;
+ pcfg->slim_cfg.bit_width = cfg->bit_width;
+ pcfg->slim_cfg.num_channels = cfg->num_channels;
+ pcfg->slim_cfg.data_format = cfg->data_format;
+ pcfg->slim_cfg.shared_ch_mapping[0] = cfg->ch_mapping[0];
+ pcfg->slim_cfg.shared_ch_mapping[1] = cfg->ch_mapping[1];
+ pcfg->slim_cfg.shared_ch_mapping[2] = cfg->ch_mapping[2];
+ pcfg->slim_cfg.shared_ch_mapping[3] = cfg->ch_mapping[3];
+
+}
+EXPORT_SYMBOL_GPL(q6afe_slim_port_prepare);
+
+/**
+ * q6afe_tdm_port_prepare() - Prepare tdm afe port.
+ *
+ * @port: Instance of afe port
+ * @cfg: TDM configuration for the afe port
+ *
+ */
+void q6afe_tdm_port_prepare(struct q6afe_port *port,
+ struct q6afe_tdm_cfg *cfg)
+{
+ union afe_port_config *pcfg = &port->port_cfg;
+
+ pcfg->tdm_cfg.tdm_cfg_minor_version = AFE_API_VERSION_TDM_CONFIG;
+ pcfg->tdm_cfg.num_channels = cfg->num_channels;
+ pcfg->tdm_cfg.sample_rate = cfg->sample_rate;
+ pcfg->tdm_cfg.bit_width = cfg->bit_width;
+ pcfg->tdm_cfg.data_format = cfg->data_format;
+ pcfg->tdm_cfg.sync_mode = cfg->sync_mode;
+ pcfg->tdm_cfg.sync_src = cfg->sync_src;
+ pcfg->tdm_cfg.nslots_per_frame = cfg->nslots_per_frame;
+
+ pcfg->tdm_cfg.slot_width = cfg->slot_width;
+ pcfg->tdm_cfg.slot_mask = cfg->slot_mask;
+ port->scfg = kzalloc(sizeof(*port->scfg), GFP_KERNEL);
+ if (!port->scfg)
+ return;
+
+ port->scfg->minor_version = AFE_API_VERSION_SLOT_MAPPING_CONFIG;
+ port->scfg->num_channels = cfg->num_channels;
+ port->scfg->bitwidth = cfg->bit_width;
+ port->scfg->data_align_type = cfg->data_align_type;
+ memcpy(port->scfg->ch_mapping, cfg->ch_mapping,
+ sizeof(u16) * AFE_PORT_MAX_AUDIO_CHAN_CNT);
+}
+EXPORT_SYMBOL_GPL(q6afe_tdm_port_prepare);
+
+/**
+ * q6afe_hdmi_port_prepare() - Prepare hdmi afe port.
+ *
+ * @port: Instance of afe port
+ * @cfg: HDMI configuration for the afe port
+ *
+ */
+void q6afe_hdmi_port_prepare(struct q6afe_port *port,
+ struct q6afe_hdmi_cfg *cfg)
+{
+ union afe_port_config *pcfg = &port->port_cfg;
+
+ pcfg->hdmi_multi_ch.hdmi_cfg_minor_version =
+ AFE_API_VERSION_HDMI_CONFIG;
+ pcfg->hdmi_multi_ch.datatype = cfg->datatype;
+ pcfg->hdmi_multi_ch.channel_allocation = cfg->channel_allocation;
+ pcfg->hdmi_multi_ch.sample_rate = cfg->sample_rate;
+ pcfg->hdmi_multi_ch.bit_width = cfg->bit_width;
+}
+EXPORT_SYMBOL_GPL(q6afe_hdmi_port_prepare);
+
+/**
+ * q6afe_i2s_port_prepare() - Prepare i2s afe port.
+ *
+ * @port: Instance of afe port
+ * @cfg: I2S configuration for the afe port
+ * Return: Will be an negative on error and zero on success.
+ */
+int q6afe_i2s_port_prepare(struct q6afe_port *port, struct q6afe_i2s_cfg *cfg)
+{
+ union afe_port_config *pcfg = &port->port_cfg;
+ struct device *dev = port->afe->dev;
+ int num_sd_lines;
+
+ pcfg->i2s_cfg.i2s_cfg_minor_version = AFE_API_VERSION_I2S_CONFIG;
+ pcfg->i2s_cfg.sample_rate = cfg->sample_rate;
+ pcfg->i2s_cfg.bit_width = cfg->bit_width;
+ pcfg->i2s_cfg.data_format = AFE_LINEAR_PCM_DATA;
+
+ switch (cfg->fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ pcfg->i2s_cfg.ws_src = AFE_PORT_CONFIG_I2S_WS_SRC_INTERNAL;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFM:
+ /* CPU is slave */
+ pcfg->i2s_cfg.ws_src = AFE_PORT_CONFIG_I2S_WS_SRC_EXTERNAL;
+ break;
+ default:
+ break;
+ }
+
+ num_sd_lines = hweight_long(cfg->sd_line_mask);
+
+ switch (num_sd_lines) {
+ case 0:
+ dev_err(dev, "no line is assigned\n");
+ return -EINVAL;
+ case 1:
+ switch (cfg->sd_line_mask) {
+ case AFE_PORT_I2S_SD0_MASK:
+ pcfg->i2s_cfg.channel_mode = AFE_PORT_I2S_SD0;
+ break;
+ case AFE_PORT_I2S_SD1_MASK:
+ pcfg->i2s_cfg.channel_mode = AFE_PORT_I2S_SD1;
+ break;
+ case AFE_PORT_I2S_SD2_MASK:
+ pcfg->i2s_cfg.channel_mode = AFE_PORT_I2S_SD2;
+ break;
+ case AFE_PORT_I2S_SD3_MASK:
+ pcfg->i2s_cfg.channel_mode = AFE_PORT_I2S_SD3;
+ break;
+ default:
+ dev_err(dev, "Invalid SD lines\n");
+ return -EINVAL;
+ }
+ break;
+ case 2:
+ switch (cfg->sd_line_mask) {
+ case AFE_PORT_I2S_SD0_1_MASK:
+ pcfg->i2s_cfg.channel_mode = AFE_PORT_I2S_QUAD01;
+ break;
+ case AFE_PORT_I2S_SD2_3_MASK:
+ pcfg->i2s_cfg.channel_mode = AFE_PORT_I2S_QUAD23;
+ break;
+ default:
+ dev_err(dev, "Invalid SD lines\n");
+ return -EINVAL;
+ }
+ break;
+ case 3:
+ switch (cfg->sd_line_mask) {
+ case AFE_PORT_I2S_SD0_1_2_MASK:
+ pcfg->i2s_cfg.channel_mode = AFE_PORT_I2S_6CHS;
+ break;
+ default:
+ dev_err(dev, "Invalid SD lines\n");
+ return -EINVAL;
+ }
+ break;
+ case 4:
+ switch (cfg->sd_line_mask) {
+ case AFE_PORT_I2S_SD0_1_2_3_MASK:
+ pcfg->i2s_cfg.channel_mode = AFE_PORT_I2S_8CHS;
+
+ break;
+ default:
+ dev_err(dev, "Invalid SD lines\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ dev_err(dev, "Invalid SD lines\n");
+ return -EINVAL;
+ }
+
+ switch (cfg->num_channels) {
+ case 1:
+ case 2:
+ switch (pcfg->i2s_cfg.channel_mode) {
+ case AFE_PORT_I2S_QUAD01:
+ case AFE_PORT_I2S_6CHS:
+ case AFE_PORT_I2S_8CHS:
+ pcfg->i2s_cfg.channel_mode = AFE_PORT_I2S_SD0;
+ break;
+ case AFE_PORT_I2S_QUAD23:
+ pcfg->i2s_cfg.channel_mode = AFE_PORT_I2S_SD2;
+ break;
+ }
+
+ if (cfg->num_channels == 2)
+ pcfg->i2s_cfg.mono_stereo = AFE_PORT_I2S_STEREO;
+ else
+ pcfg->i2s_cfg.mono_stereo = AFE_PORT_I2S_MONO;
+
+ break;
+ case 3:
+ case 4:
+ if (pcfg->i2s_cfg.channel_mode < AFE_PORT_I2S_QUAD01) {
+ dev_err(dev, "Invalid Channel mode\n");
+ return -EINVAL;
+ }
+ break;
+ case 5:
+ case 6:
+ if (pcfg->i2s_cfg.channel_mode < AFE_PORT_I2S_6CHS) {
+ dev_err(dev, "Invalid Channel mode\n");
+ return -EINVAL;
+ }
+ break;
+ case 7:
+ case 8:
+ if (pcfg->i2s_cfg.channel_mode < AFE_PORT_I2S_8CHS) {
+ dev_err(dev, "Invalid Channel mode\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(q6afe_i2s_port_prepare);
+
+/**
+ * q6afe_port_start() - Start a afe port
+ *
+ * @port: Instance of port to start
+ *
+ * Return: Will be an negative on packet size on success.
+ */
+int q6afe_port_start(struct q6afe_port *port)
+{
+ struct afe_port_cmd_device_start *start;
+ struct q6afe *afe = port->afe;
+ int port_id = port->id;
+ int ret, param_id = port->cfg_type;
+ struct apr_pkt *pkt;
+ int pkt_size;
+ void *p;
+
+ ret = q6afe_port_set_param_v2(port, &port->port_cfg, param_id,
+ AFE_MODULE_AUDIO_DEV_INTERFACE,
+ sizeof(port->port_cfg));
+ if (ret) {
+ dev_err(afe->dev, "AFE enable for port 0x%x failed %d\n",
+ port_id, ret);
+ return ret;
+ }
+
+ if (port->scfg) {
+ ret = q6afe_port_set_param_v2(port, port->scfg,
+ AFE_PARAM_ID_PORT_SLOT_MAPPING_CONFIG,
+ AFE_MODULE_TDM, sizeof(*port->scfg));
+ if (ret) {
+ dev_err(afe->dev, "AFE enable for port 0x%x failed %d\n",
+ port_id, ret);
+ return ret;
+ }
+ }
+
+ pkt_size = APR_HDR_SIZE + sizeof(*start);
+ p = kzalloc(pkt_size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ start = p + APR_HDR_SIZE;
+
+ pkt->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ pkt->hdr.pkt_size = pkt_size;
+ pkt->hdr.src_port = 0;
+ pkt->hdr.dest_port = 0;
+ pkt->hdr.token = port->token;
+ pkt->hdr.opcode = AFE_PORT_CMD_DEVICE_START;
+
+ start->port_id = port_id;
+
+ ret = afe_apr_send_pkt(afe, pkt, port);
+ if (ret)
+ dev_err(afe->dev, "AFE enable for port 0x%x failed %d\n",
+ port_id, ret);
+
+ kfree(pkt);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(q6afe_port_start);
+
+/**
+ * q6afe_port_get_from_id() - Get port instance from a port id
+ *
+ * @dev: Pointer to afe child device.
+ * @id: port id
+ *
+ * Return: Will be an error pointer on error or a valid afe port
+ * on success.
+ */
+struct q6afe_port *q6afe_port_get_from_id(struct device *dev, int id)
+{
+ int port_id;
+ struct q6afe *afe = dev_get_drvdata(dev->parent);
+ struct q6afe_port *port;
+ unsigned long flags;
+ int cfg_type;
+
+ if (id < 0 || id > AFE_PORT_MAX) {
+ dev_err(dev, "AFE port token[%d] invalid!\n", id);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* if port is multiple times bind/unbind before callback finishes */
+ port = q6afe_find_port(afe, id);
+ if (port) {
+ dev_err(dev, "AFE Port already open\n");
+ return port;
+ }
+
+ port_id = port_maps[id].port_id;
+
+ switch (port_id) {
+ case AFE_PORT_ID_MULTICHAN_HDMI_RX:
+ cfg_type = AFE_PARAM_ID_HDMI_CONFIG;
+ break;
+ case AFE_PORT_ID_SLIMBUS_MULTI_CHAN_0_RX:
+ case AFE_PORT_ID_SLIMBUS_MULTI_CHAN_1_RX:
+ case AFE_PORT_ID_SLIMBUS_MULTI_CHAN_2_RX:
+ case AFE_PORT_ID_SLIMBUS_MULTI_CHAN_3_RX:
+ case AFE_PORT_ID_SLIMBUS_MULTI_CHAN_4_RX:
+ case AFE_PORT_ID_SLIMBUS_MULTI_CHAN_5_RX:
+ case AFE_PORT_ID_SLIMBUS_MULTI_CHAN_6_RX:
+ cfg_type = AFE_PARAM_ID_SLIMBUS_CONFIG;
+ break;
+
+ case AFE_PORT_ID_PRIMARY_MI2S_RX:
+ case AFE_PORT_ID_PRIMARY_MI2S_TX:
+ case AFE_PORT_ID_SECONDARY_MI2S_RX:
+ case AFE_PORT_ID_SECONDARY_MI2S_TX:
+ case AFE_PORT_ID_TERTIARY_MI2S_RX:
+ case AFE_PORT_ID_TERTIARY_MI2S_TX:
+ case AFE_PORT_ID_QUATERNARY_MI2S_RX:
+ case AFE_PORT_ID_QUATERNARY_MI2S_TX:
+ cfg_type = AFE_PARAM_ID_I2S_CONFIG;
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_RX ... AFE_PORT_ID_QUINARY_TDM_TX_7:
+ cfg_type = AFE_PARAM_ID_TDM_CONFIG;
+ break;
+
+ default:
+ dev_err(dev, "Invalid port id 0x%x\n", port_id);
+ return ERR_PTR(-EINVAL);
+ }
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return ERR_PTR(-ENOMEM);
+
+ init_waitqueue_head(&port->wait);
+
+ port->token = id;
+ port->id = port_id;
+ port->afe = afe;
+ port->cfg_type = cfg_type;
+ kref_init(&port->refcount);
+
+ spin_lock_irqsave(&afe->port_list_lock, flags);
+ list_add_tail(&port->node, &afe->port_list);
+ spin_unlock_irqrestore(&afe->port_list_lock, flags);
+
+ return port;
+
+}
+EXPORT_SYMBOL_GPL(q6afe_port_get_from_id);
+
+/**
+ * q6afe_port_put() - Release port reference
+ *
+ * @port: Instance of port to put
+ */
+void q6afe_port_put(struct q6afe_port *port)
+{
+ kref_put(&port->refcount, q6afe_port_free);
+}
+EXPORT_SYMBOL_GPL(q6afe_port_put);
+
+static int q6afe_probe(struct apr_device *adev)
+{
+ struct q6afe *afe;
+ struct device *dev = &adev->dev;
+ struct device_node *dais_np;
+
+ afe = devm_kzalloc(dev, sizeof(*afe), GFP_KERNEL);
+ if (!afe)
+ return -ENOMEM;
+
+ q6core_get_svc_api_info(adev->svc_id, &afe->ainfo);
+ afe->apr = adev;
+ mutex_init(&afe->lock);
+ afe->dev = dev;
+ INIT_LIST_HEAD(&afe->port_list);
+ spin_lock_init(&afe->port_list_lock);
+
+ dev_set_drvdata(dev, afe);
+
+ dais_np = of_get_child_by_name(dev->of_node, "dais");
+ if (dais_np) {
+ afe->pdev_dais = of_platform_device_create(dais_np,
+ "q6afe-dai", dev);
+ of_node_put(dais_np);
+ }
+
+ return 0;
+}
+
+static int q6afe_remove(struct apr_device *adev)
+{
+ struct q6afe *afe = dev_get_drvdata(&adev->dev);
+
+ if (afe->pdev_dais)
+ of_platform_device_destroy(&afe->pdev_dais->dev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id q6afe_device_id[] = {
+ { .compatible = "qcom,q6afe" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, q6afe_device_id);
+
+static struct apr_driver qcom_q6afe_driver = {
+ .probe = q6afe_probe,
+ .remove = q6afe_remove,
+ .callback = q6afe_callback,
+ .driver = {
+ .name = "qcom-q6afe",
+ .of_match_table = of_match_ptr(q6afe_device_id),
+
+ },
+};
+
+module_apr_driver(qcom_q6afe_driver);
+MODULE_DESCRIPTION("Q6 Audio Front End");
+MODULE_LICENSE("GPL v2");
diff --git a/rr-cache/eeed3d17605fdb06a241cda9eed6d09d601101b6/preimage b/rr-cache/eeed3d17605fdb06a241cda9eed6d09d601101b6/preimage
new file mode 100644
index 0000000..3ce8b16
--- /dev/null
+++ b/rr-cache/eeed3d17605fdb06a241cda9eed6d09d601101b6/preimage
@@ -0,0 +1,2236 @@
+// SPDX-License-Identifier: GPL-2.0
+<<<<<<<
+/*
+ * Copyright (c) 2011-2017, The Linux Foundation
+ * Copyright (c) 2018, Linaro Limited
+ */
+=======
+// Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+// Copyright (c) 2018, Linaro Limited
+>>>>>>>
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+<<<<<<<
+#include <linux/kref.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/spinlock.h>
+=======
+#include <linux/of.h>
+>>>>>>>
+#include <linux/delay.h>
+#include <linux/soc/qcom/apr.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include "q6dsp-errno.h"
+#include "q6core.h"
+#include "q6afe.h"
+
+/* AFE CMDs */
+#define AFE_PORT_CMD_DEVICE_START 0x000100E5
+#define AFE_PORT_CMD_DEVICE_STOP 0x000100E6
+#define AFE_PORT_CMD_SET_PARAM_V2 0x000100EF
+<<<<<<<
+#define AFE_PORT_CMDRSP_GET_PARAM_V2 0x00010106
+#define AFE_PARAM_ID_HDMI_CONFIG 0x00010210
+#define AFE_MODULE_AUDIO_DEV_INTERFACE 0x0001020C
+=======
+#define AFE_SVC_CMD_SET_PARAM 0x000100f3
+#define AFE_PORT_CMDRSP_GET_PARAM_V2 0x00010106
+#define AFE_PARAM_ID_HDMI_CONFIG 0x00010210
+#define AFE_MODULE_AUDIO_DEV_INTERFACE 0x0001020C
+#define AFE_MODULE_TDM 0x0001028A
+>>>>>>>
+
+#define AFE_PARAM_ID_CDC_SLIMBUS_SLAVE_CFG 0x00010235
+
+#define AFE_PARAM_ID_LPAIF_CLK_CONFIG 0x00010238
+<<<<<<<
+#define AFE_PARAM_ID_INTERNAL_DIGITAL_CDC_CLK_CONFIG 0x00010239
+
+#define AFE_PARAM_ID_SLIMBUS_CONFIG 0x00010212
+#define AFE_PARAM_ID_I2S_CONFIG 0x0001020D
+=======
+#define AFE_PARAM_ID_INT_DIGITAL_CDC_CLK_CONFIG 0x00010239
+
+#define AFE_PARAM_ID_SLIMBUS_CONFIG 0x00010212
+#define AFE_PARAM_ID_I2S_CONFIG 0x0001020D
+#define AFE_PARAM_ID_TDM_CONFIG 0x0001029D
+#define AFE_PARAM_ID_PORT_SLOT_MAPPING_CONFIG 0x00010297
+>>>>>>>
+
+/* I2S config specific */
+#define AFE_API_VERSION_I2S_CONFIG 0x1
+#define AFE_PORT_I2S_SD0 0x1
+#define AFE_PORT_I2S_SD1 0x2
+#define AFE_PORT_I2S_SD2 0x3
+#define AFE_PORT_I2S_SD3 0x4
+<<<<<<<
+=======
+#define AFE_PORT_I2S_SD0_MASK BIT(0x1)
+#define AFE_PORT_I2S_SD1_MASK BIT(0x2)
+#define AFE_PORT_I2S_SD2_MASK BIT(0x3)
+#define AFE_PORT_I2S_SD3_MASK BIT(0x4)
+#define AFE_PORT_I2S_SD0_1_MASK GENMASK(2, 1)
+#define AFE_PORT_I2S_SD2_3_MASK GENMASK(4, 3)
+#define AFE_PORT_I2S_SD0_1_2_MASK GENMASK(3, 1)
+#define AFE_PORT_I2S_SD0_1_2_3_MASK GENMASK(4, 1)
+>>>>>>>
+#define AFE_PORT_I2S_QUAD01 0x5
+#define AFE_PORT_I2S_QUAD23 0x6
+#define AFE_PORT_I2S_6CHS 0x7
+#define AFE_PORT_I2S_8CHS 0x8
+#define AFE_PORT_I2S_MONO 0x0
+#define AFE_PORT_I2S_STEREO 0x1
+#define AFE_PORT_CONFIG_I2S_WS_SRC_EXTERNAL 0x0
+#define AFE_PORT_CONFIG_I2S_WS_SRC_INTERNAL 0x1
+#define AFE_LINEAR_PCM_DATA 0x0
+
+
+/* Port IDs */
+#define AFE_API_VERSION_HDMI_CONFIG 0x1
+#define AFE_PORT_ID_MULTICHAN_HDMI_RX 0x100E
+
+#define AFE_API_VERSION_SLIMBUS_CONFIG 0x1
+<<<<<<<
+=======
+/* Clock set API version */
+#define AFE_API_VERSION_CLOCK_SET 1
+#define Q6AFE_LPASS_CLK_CONFIG_API_VERSION 0x1
+#define AFE_MODULE_CLOCK_SET 0x0001028F
+#define AFE_PARAM_ID_CLOCK_SET 0x00010290
+>>>>>>>
+
+/* SLIMbus Rx port on channel 0. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_0_RX 0x4000
+/* SLIMbus Tx port on channel 0. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_0_TX 0x4001
+/* SLIMbus Rx port on channel 1. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_1_RX 0x4002
+/* SLIMbus Tx port on channel 1. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_1_TX 0x4003
+/* SLIMbus Rx port on channel 2. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_2_RX 0x4004
+/* SLIMbus Tx port on channel 2. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_2_TX 0x4005
+/* SLIMbus Rx port on channel 3. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_3_RX 0x4006
+/* SLIMbus Tx port on channel 3. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_3_TX 0x4007
+/* SLIMbus Rx port on channel 4. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_4_RX 0x4008
+/* SLIMbus Tx port on channel 4. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_4_TX 0x4009
+/* SLIMbus Rx port on channel 5. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_5_RX 0x400a
+/* SLIMbus Tx port on channel 5. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_5_TX 0x400b
+/* SLIMbus Rx port on channel 6. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_6_RX 0x400c
+/* SLIMbus Tx port on channel 6. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_6_TX 0x400d
+#define AFE_PORT_ID_PRIMARY_MI2S_RX 0x1000
+#define AFE_PORT_ID_PRIMARY_MI2S_TX 0x1001
+#define AFE_PORT_ID_SECONDARY_MI2S_RX 0x1002
+#define AFE_PORT_ID_SECONDARY_MI2S_TX 0x1003
+#define AFE_PORT_ID_TERTIARY_MI2S_RX 0x1004
+#define AFE_PORT_ID_TERTIARY_MI2S_TX 0x1005
+#define AFE_PORT_ID_QUATERNARY_MI2S_RX 0x1006
+#define AFE_PORT_ID_QUATERNARY_MI2S_TX 0x1007
+
+<<<<<<<
+=======
+/* Start of the range of port IDs for TDM devices. */
+#define AFE_PORT_ID_TDM_PORT_RANGE_START 0x9000
+
+/* End of the range of port IDs for TDM devices. */
+#define AFE_PORT_ID_TDM_PORT_RANGE_END \
+ (AFE_PORT_ID_TDM_PORT_RANGE_START+0x50-1)
+
+/* Size of the range of port IDs for TDM ports. */
+#define AFE_PORT_ID_TDM_PORT_RANGE_SIZE \
+ (AFE_PORT_ID_TDM_PORT_RANGE_END - \
+ AFE_PORT_ID_TDM_PORT_RANGE_START+1)
+
+#define AFE_PORT_ID_PRIMARY_TDM_RX \
+ (AFE_PORT_ID_TDM_PORT_RANGE_START + 0x00)
+#define AFE_PORT_ID_PRIMARY_TDM_RX_1 \
+ (AFE_PORT_ID_PRIMARY_TDM_RX + 0x02)
+#define AFE_PORT_ID_PRIMARY_TDM_RX_2 \
+ (AFE_PORT_ID_PRIMARY_TDM_RX + 0x04)
+#define AFE_PORT_ID_PRIMARY_TDM_RX_3 \
+ (AFE_PORT_ID_PRIMARY_TDM_RX + 0x06)
+#define AFE_PORT_ID_PRIMARY_TDM_RX_4 \
+ (AFE_PORT_ID_PRIMARY_TDM_RX + 0x08)
+#define AFE_PORT_ID_PRIMARY_TDM_RX_5 \
+ (AFE_PORT_ID_PRIMARY_TDM_RX + 0x0A)
+#define AFE_PORT_ID_PRIMARY_TDM_RX_6 \
+ (AFE_PORT_ID_PRIMARY_TDM_RX + 0x0C)
+#define AFE_PORT_ID_PRIMARY_TDM_RX_7 \
+ (AFE_PORT_ID_PRIMARY_TDM_RX + 0x0E)
+
+#define AFE_PORT_ID_PRIMARY_TDM_TX \
+ (AFE_PORT_ID_TDM_PORT_RANGE_START + 0x01)
+#define AFE_PORT_ID_PRIMARY_TDM_TX_1 \
+ (AFE_PORT_ID_PRIMARY_TDM_TX + 0x02)
+#define AFE_PORT_ID_PRIMARY_TDM_TX_2 \
+ (AFE_PORT_ID_PRIMARY_TDM_TX + 0x04)
+#define AFE_PORT_ID_PRIMARY_TDM_TX_3 \
+ (AFE_PORT_ID_PRIMARY_TDM_TX + 0x06)
+#define AFE_PORT_ID_PRIMARY_TDM_TX_4 \
+ (AFE_PORT_ID_PRIMARY_TDM_TX + 0x08)
+#define AFE_PORT_ID_PRIMARY_TDM_TX_5 \
+ (AFE_PORT_ID_PRIMARY_TDM_TX + 0x0A)
+#define AFE_PORT_ID_PRIMARY_TDM_TX_6 \
+ (AFE_PORT_ID_PRIMARY_TDM_TX + 0x0C)
+#define AFE_PORT_ID_PRIMARY_TDM_TX_7 \
+ (AFE_PORT_ID_PRIMARY_TDM_TX + 0x0E)
+
+#define AFE_PORT_ID_SECONDARY_TDM_RX \
+ (AFE_PORT_ID_TDM_PORT_RANGE_START + 0x10)
+#define AFE_PORT_ID_SECONDARY_TDM_RX_1 \
+ (AFE_PORT_ID_SECONDARY_TDM_RX + 0x02)
+#define AFE_PORT_ID_SECONDARY_TDM_RX_2 \
+ (AFE_PORT_ID_SECONDARY_TDM_RX + 0x04)
+#define AFE_PORT_ID_SECONDARY_TDM_RX_3 \
+ (AFE_PORT_ID_SECONDARY_TDM_RX + 0x06)
+#define AFE_PORT_ID_SECONDARY_TDM_RX_4 \
+ (AFE_PORT_ID_SECONDARY_TDM_RX + 0x08)
+#define AFE_PORT_ID_SECONDARY_TDM_RX_5 \
+ (AFE_PORT_ID_SECONDARY_TDM_RX + 0x0A)
+#define AFE_PORT_ID_SECONDARY_TDM_RX_6 \
+ (AFE_PORT_ID_SECONDARY_TDM_RX + 0x0C)
+#define AFE_PORT_ID_SECONDARY_TDM_RX_7 \
+ (AFE_PORT_ID_SECONDARY_TDM_RX + 0x0E)
+
+#define AFE_PORT_ID_SECONDARY_TDM_TX \
+ (AFE_PORT_ID_TDM_PORT_RANGE_START + 0x11)
+#define AFE_PORT_ID_SECONDARY_TDM_TX_1 \
+ (AFE_PORT_ID_SECONDARY_TDM_TX + 0x02)
+#define AFE_PORT_ID_SECONDARY_TDM_TX_2 \
+ (AFE_PORT_ID_SECONDARY_TDM_TX + 0x04)
+#define AFE_PORT_ID_SECONDARY_TDM_TX_3 \
+ (AFE_PORT_ID_SECONDARY_TDM_TX + 0x06)
+#define AFE_PORT_ID_SECONDARY_TDM_TX_4 \
+ (AFE_PORT_ID_SECONDARY_TDM_TX + 0x08)
+#define AFE_PORT_ID_SECONDARY_TDM_TX_5 \
+ (AFE_PORT_ID_SECONDARY_TDM_TX + 0x0A)
+#define AFE_PORT_ID_SECONDARY_TDM_TX_6 \
+ (AFE_PORT_ID_SECONDARY_TDM_TX + 0x0C)
+#define AFE_PORT_ID_SECONDARY_TDM_TX_7 \
+ (AFE_PORT_ID_SECONDARY_TDM_TX + 0x0E)
+
+#define AFE_PORT_ID_TERTIARY_TDM_RX \
+ (AFE_PORT_ID_TDM_PORT_RANGE_START + 0x20)
+#define AFE_PORT_ID_TERTIARY_TDM_RX_1 \
+ (AFE_PORT_ID_TERTIARY_TDM_RX + 0x02)
+#define AFE_PORT_ID_TERTIARY_TDM_RX_2 \
+ (AFE_PORT_ID_TERTIARY_TDM_RX + 0x04)
+#define AFE_PORT_ID_TERTIARY_TDM_RX_3 \
+ (AFE_PORT_ID_TERTIARY_TDM_RX + 0x06)
+#define AFE_PORT_ID_TERTIARY_TDM_RX_4 \
+ (AFE_PORT_ID_TERTIARY_TDM_RX + 0x08)
+#define AFE_PORT_ID_TERTIARY_TDM_RX_5 \
+ (AFE_PORT_ID_TERTIARY_TDM_RX + 0x0A)
+#define AFE_PORT_ID_TERTIARY_TDM_RX_6 \
+ (AFE_PORT_ID_TERTIARY_TDM_RX + 0x0C)
+#define AFE_PORT_ID_TERTIARY_TDM_RX_7 \
+ (AFE_PORT_ID_TERTIARY_TDM_RX + 0x0E)
+
+#define AFE_PORT_ID_TERTIARY_TDM_TX \
+ (AFE_PORT_ID_TDM_PORT_RANGE_START + 0x21)
+#define AFE_PORT_ID_TERTIARY_TDM_TX_1 \
+ (AFE_PORT_ID_TERTIARY_TDM_TX + 0x02)
+#define AFE_PORT_ID_TERTIARY_TDM_TX_2 \
+ (AFE_PORT_ID_TERTIARY_TDM_TX + 0x04)
+#define AFE_PORT_ID_TERTIARY_TDM_TX_3 \
+ (AFE_PORT_ID_TERTIARY_TDM_TX + 0x06)
+#define AFE_PORT_ID_TERTIARY_TDM_TX_4 \
+ (AFE_PORT_ID_TERTIARY_TDM_TX + 0x08)
+#define AFE_PORT_ID_TERTIARY_TDM_TX_5 \
+ (AFE_PORT_ID_TERTIARY_TDM_TX + 0x0A)
+#define AFE_PORT_ID_TERTIARY_TDM_TX_6 \
+ (AFE_PORT_ID_TERTIARY_TDM_TX + 0x0C)
+#define AFE_PORT_ID_TERTIARY_TDM_TX_7 \
+ (AFE_PORT_ID_TERTIARY_TDM_TX + 0x0E)
+
+#define AFE_PORT_ID_QUATERNARY_TDM_RX \
+ (AFE_PORT_ID_TDM_PORT_RANGE_START + 0x30)
+#define AFE_PORT_ID_QUATERNARY_TDM_RX_1 \
+ (AFE_PORT_ID_QUATERNARY_TDM_RX + 0x02)
+#define AFE_PORT_ID_QUATERNARY_TDM_RX_2 \
+ (AFE_PORT_ID_QUATERNARY_TDM_RX + 0x04)
+#define AFE_PORT_ID_QUATERNARY_TDM_RX_3 \
+ (AFE_PORT_ID_QUATERNARY_TDM_RX + 0x06)
+#define AFE_PORT_ID_QUATERNARY_TDM_RX_4 \
+ (AFE_PORT_ID_QUATERNARY_TDM_RX + 0x08)
+#define AFE_PORT_ID_QUATERNARY_TDM_RX_5 \
+ (AFE_PORT_ID_QUATERNARY_TDM_RX + 0x0A)
+#define AFE_PORT_ID_QUATERNARY_TDM_RX_6 \
+ (AFE_PORT_ID_QUATERNARY_TDM_RX + 0x0C)
+#define AFE_PORT_ID_QUATERNARY_TDM_RX_7 \
+ (AFE_PORT_ID_QUATERNARY_TDM_RX + 0x0E)
+
+#define AFE_PORT_ID_QUATERNARY_TDM_TX \
+ (AFE_PORT_ID_TDM_PORT_RANGE_START + 0x31)
+#define AFE_PORT_ID_QUATERNARY_TDM_TX_1 \
+ (AFE_PORT_ID_QUATERNARY_TDM_TX + 0x02)
+#define AFE_PORT_ID_QUATERNARY_TDM_TX_2 \
+ (AFE_PORT_ID_QUATERNARY_TDM_TX + 0x04)
+#define AFE_PORT_ID_QUATERNARY_TDM_TX_3 \
+ (AFE_PORT_ID_QUATERNARY_TDM_TX + 0x06)
+#define AFE_PORT_ID_QUATERNARY_TDM_TX_4 \
+ (AFE_PORT_ID_QUATERNARY_TDM_TX + 0x08)
+#define AFE_PORT_ID_QUATERNARY_TDM_TX_5 \
+ (AFE_PORT_ID_QUATERNARY_TDM_TX + 0x0A)
+#define AFE_PORT_ID_QUATERNARY_TDM_TX_6 \
+ (AFE_PORT_ID_QUATERNARY_TDM_TX + 0x0C)
+#define AFE_PORT_ID_QUATERNARY_TDM_TX_7 \
+ (AFE_PORT_ID_QUATERNARY_TDM_TX + 0x0E)
+
+#define AFE_PORT_ID_QUINARY_TDM_RX \
+ (AFE_PORT_ID_TDM_PORT_RANGE_START + 0x40)
+#define AFE_PORT_ID_QUINARY_TDM_RX_1 \
+ (AFE_PORT_ID_QUINARY_TDM_RX + 0x02)
+#define AFE_PORT_ID_QUINARY_TDM_RX_2 \
+ (AFE_PORT_ID_QUINARY_TDM_RX + 0x04)
+#define AFE_PORT_ID_QUINARY_TDM_RX_3 \
+ (AFE_PORT_ID_QUINARY_TDM_RX + 0x06)
+#define AFE_PORT_ID_QUINARY_TDM_RX_4 \
+ (AFE_PORT_ID_QUINARY_TDM_RX + 0x08)
+#define AFE_PORT_ID_QUINARY_TDM_RX_5 \
+ (AFE_PORT_ID_QUINARY_TDM_RX + 0x0A)
+#define AFE_PORT_ID_QUINARY_TDM_RX_6 \
+ (AFE_PORT_ID_QUINARY_TDM_RX + 0x0C)
+#define AFE_PORT_ID_QUINARY_TDM_RX_7 \
+ (AFE_PORT_ID_QUINARY_TDM_RX + 0x0E)
+
+#define AFE_PORT_ID_QUINARY_TDM_TX \
+ (AFE_PORT_ID_TDM_PORT_RANGE_START + 0x41)
+#define AFE_PORT_ID_QUINARY_TDM_TX_1 \
+ (AFE_PORT_ID_QUINARY_TDM_TX + 0x02)
+#define AFE_PORT_ID_QUINARY_TDM_TX_2 \
+ (AFE_PORT_ID_QUINARY_TDM_TX + 0x04)
+#define AFE_PORT_ID_QUINARY_TDM_TX_3 \
+ (AFE_PORT_ID_QUINARY_TDM_TX + 0x06)
+#define AFE_PORT_ID_QUINARY_TDM_TX_4 \
+ (AFE_PORT_ID_QUINARY_TDM_TX + 0x08)
+#define AFE_PORT_ID_QUINARY_TDM_TX_5 \
+ (AFE_PORT_ID_QUINARY_TDM_TX + 0x0A)
+#define AFE_PORT_ID_QUINARY_TDM_TX_6 \
+ (AFE_PORT_ID_QUINARY_TDM_TX + 0x0C)
+#define AFE_PORT_ID_QUINARY_TDM_TX_7 \
+ (AFE_PORT_ID_QUINARY_TDM_TX + 0x0E)
+
+>>>>>>>
+#define Q6AFE_LPASS_MODE_CLK1_VALID 1
+#define Q6AFE_LPASS_MODE_CLK2_VALID 2
+#define Q6AFE_LPASS_CLK_SRC_INTERNAL 1
+#define Q6AFE_LPASS_CLK_ROOT_DEFAULT 0
+<<<<<<<
+=======
+#define AFE_API_VERSION_TDM_CONFIG 1
+#define AFE_API_VERSION_SLOT_MAPPING_CONFIG 1
+>>>>>>>
+
+#define TIMEOUT_MS 1000
+#define AFE_CMD_RESP_AVAIL 0
+#define AFE_CMD_RESP_NONE 1
+
+<<<<<<<
+/* v1 Specific */
+#define AFE_PORT_MULTI_CHAN_HDMI_AUDIO_IF_CONFIG 0x000100D9
+#define AFE_PORT_CMD_START 0x000100ca
+#define AFE_PORT_CMD_STOP 0x000100cb
+
+struct q6afe_ops {
+ int (*port_stop)(struct q6afe_port *port);
+ int (*port_start)(struct q6afe_port *port);
+ struct q6afe_port * (*port_get_from_id)(struct device *dev, int id);
+ void (*hdmi_prepare) (struct q6afe_port *port,
+ struct q6afe_hdmi_cfg *cfg);
+};
+
+struct q6afe {
+ struct apr_device *apr;
+ struct device *dev;
+ int state;
+ int status;
+
+ struct mutex lock;
+ struct list_head port_list;
+ spinlock_t port_list_lock;
+ struct list_head node;
+ void *dai_data;
+ struct q6afe_ops *ops;
+};
+
+struct afe_port_start_command_v1 {
+ struct apr_hdr hdr;
+ u16 port_id;
+ u16 gain; /* Q13 */
+ u32 sample_rate; /* 8 , 16, 48khz */
+} __packed;
+
+
+struct afe_port_cmd_device_start {
+ struct apr_hdr hdr;
+=======
+struct q6afe {
+ struct apr_device *apr;
+ struct device *dev;
+ struct q6core_svc_api_info ainfo;
+ struct mutex lock;
+ struct list_head port_list;
+ spinlock_t port_list_lock;
+ struct platform_device *pdev_dais;
+};
+
+struct afe_port_cmd_device_start {
+>>>>>>>
+ u16 port_id;
+ u16 reserved;
+} __packed;
+
+<<<<<<<
+struct afe_port_cmd_device_stop {
+=======
+struct afe_port_stop_command_v1 {
+ struct apr_hdr hdr;
+ u16 port_id;
+ u16 reserved;
+} __attribute__ ((packed));
+
+struct afe_port_cmd_device_stop {
+ struct apr_hdr hdr;
+>>>>>>>
+ u16 port_id;
+ u16 reserved;
+/* Reserved for 32-bit alignment. This field must be set to 0.*/
+} __packed;
+
+struct afe_port_param_data_v2 {
+ u32 module_id;
+ u32 param_id;
+ u16 param_size;
+ u16 reserved;
+} __packed;
+
+<<<<<<<
+=======
+struct afe_svc_cmd_set_param {
+ uint32_t payload_size;
+ uint32_t payload_address_lsw;
+ uint32_t payload_address_msw;
+ uint32_t mem_map_handle;
+} __packed;
+
+>>>>>>>
+struct afe_port_cmd_set_param_v2 {
+ u16 port_id;
+ u16 payload_size;
+ u32 payload_address_lsw;
+ u32 payload_address_msw;
+ u32 mem_map_handle;
+} __packed;
+
+struct afe_param_id_hdmi_multi_chan_audio_cfg {
+ u32 hdmi_cfg_minor_version;
+ u16 datatype;
+ u16 channel_allocation;
+ u32 sample_rate;
+ u16 bit_width;
+ u16 reserved;
+} __packed;
+
+struct afe_param_id_slimbus_cfg {
+ u32 sb_cfg_minor_version;
+/* Minor version used for tracking the version of the SLIMBUS
+ * configuration interface.
+ * Supported values: #AFE_API_VERSION_SLIMBUS_CONFIG
+ */
+
+ u16 slimbus_dev_id;
+/* SLIMbus hardware device ID, which is required to handle
+ * multiple SLIMbus hardware blocks.
+ * Supported values: - #AFE_SLIMBUS_DEVICE_1 - #AFE_SLIMBUS_DEVICE_2
+ */
+ u16 bit_width;
+/* Bit width of the sample.
+ * Supported values: 16, 24
+ */
+ u16 data_format;
+/* Data format supported by the SLIMbus hardware. The default is
+ * 0 (#AFE_SB_DATA_FORMAT_NOT_INDICATED), which indicates the
+ * hardware does not perform any format conversions before the data
+ * transfer.
+ */
+ u16 num_channels;
+/* Number of channels.
+ * Supported values: 1 to #AFE_PORT_MAX_AUDIO_CHAN_CNT
+ */
+ u8 shared_ch_mapping[AFE_PORT_MAX_AUDIO_CHAN_CNT];
+/* Mapping of shared channel IDs (128 to 255) to which the
+ * master port is to be connected.
+ * Shared_channel_mapping[i] represents the shared channel assigned
+ * for audio channel i in multichannel audio data.
+ */
+ u32 sample_rate;
+/* Sampling rate of the port.
+ * Supported values:
+ * - #AFE_PORT_SAMPLE_RATE_8K
+ * - #AFE_PORT_SAMPLE_RATE_16K
+ * - #AFE_PORT_SAMPLE_RATE_48K
+ * - #AFE_PORT_SAMPLE_RATE_96K
+ * - #AFE_PORT_SAMPLE_RATE_192K
+ */
+} __packed;
+
+struct afe_clk_cfg {
+ u32 i2s_cfg_minor_version;
+ u32 clk_val1;
+ u32 clk_val2;
+ u16 clk_src;
+ u16 clk_root;
+ u16 clk_set_mode;
+ u16 reserved;
+} __packed;
+
+struct afe_digital_clk_cfg {
+ u32 i2s_cfg_minor_version;
+ u32 clk_val;
+ u16 clk_root;
+ u16 reserved;
+} __packed;
+
+struct afe_param_id_i2s_cfg {
+ u32 i2s_cfg_minor_version;
+ u16 bit_width;
+ u16 channel_mode;
+ u16 mono_stereo;
+ u16 ws_src;
+ u32 sample_rate;
+ u16 data_format;
+ u16 reserved;
+} __packed;
+
+<<<<<<<
+struct afe_param_id_tdm_cfg {
+ u32 tdm_cfg_minor_version;
+ u32 num_channels;
+ u32 sample_rate;
+ u32 bit_width;
+ u16 data_format;
+ u16 sync_mode;
+ u16 sync_src;
+ u16 nslots_per_frame;
+ u16 ctrl_data_out_enable;
+ u16 ctrl_invert_sync_pulse;
+ u16 ctrl_sync_data_delay;
+ u16 slot_width;
+ u32 slot_mask;
+} __packed;
+
+=======
+struct afe_port_hdmi_multi_ch_cfg {
+ u16 data_type; /* HDMI_Linear = 0 */
+ /* HDMI_non_Linear = 1 */
+ u16 channel_allocation; /* The default is 0 (Stereo) */
+ u16 reserved; /* must be set to 0 */
+} __packed;
+
+
+
+struct afe_port_hdmi_cfg {
+ u16 bitwidth; /* 16,24,32 */
+ u16 channel_mode; /* HDMI Stereo = 0 */
+ /* HDMI_3Point1 (4-ch) = 1 */
+ /* HDMI_5Point1 (6-ch) = 2 */
+ /* HDMI_6Point1 (8-ch) = 3 */
+ u16 data_type; /* HDMI_Linear = 0 */
+ /* HDMI_non_Linear = 1 */
+} __packed;
+
+union afe_port_config_v1 {
+ struct afe_port_hdmi_multi_ch_cfg hdmi_multi_ch;
+};
+
+>>>>>>>
+union afe_port_config {
+ struct afe_param_id_hdmi_multi_chan_audio_cfg hdmi_multi_ch;
+ struct afe_param_id_slimbus_cfg slim_cfg;
+ struct afe_param_id_i2s_cfg i2s_cfg;
+<<<<<<<
+ struct afe_param_id_tdm_cfg tdm_cfg;
+} __packed;
+
+
+struct afe_clk_set {
+ uint32_t clk_set_minor_version;
+ uint32_t clk_id;
+ uint32_t clk_freq_in_hz;
+ uint16_t clk_attri;
+ uint16_t clk_root;
+ uint32_t enable;
+};
+
+struct afe_param_id_slot_mapping_cfg {
+ u32 minor_version;
+ u16 num_channels;
+ u16 bitwidth;
+ u32 data_align_type;
+ u16 ch_mapping[AFE_PORT_MAX_AUDIO_CHAN_CNT];
+} __packed;
+
+struct q6afe_port {
+ wait_queue_head_t wait;
+ union afe_port_config port_cfg;
+ struct afe_param_id_slot_mapping_cfg *scfg;
+ struct aprv2_ibasic_rsp_result_t result;
+ int token;
+ int id;
+ int cfg_type;
+ struct q6afe *afe;
+ struct kref refcount;
+ struct list_head node;
+};
+
+=======
+} __packed;
+
+struct afe_lpass_clk_config_command {
+ struct apr_hdr hdr;
+ struct afe_port_cmd_set_param_v2 param;
+ struct afe_port_param_data_v2 pdata;
+ struct afe_clk_cfg clk_cfg;
+} __packed;
+
+struct afe_lpass_digital_clk_config_command {
+ struct apr_hdr hdr;
+ struct afe_port_cmd_set_param_v2 param;
+ struct afe_port_param_data_v2 pdata;
+ struct afe_digital_clk_cfg clk_cfg;
+} __packed;
+
+/* This param id is used to configure internal clk */
+struct q6afe_port {
+ wait_queue_head_t wait;
+ union afe_port_config port_cfg;
+ union afe_port_config_v1 port_cfgv1;
+ int token;
+ int id;
+ int cfg_type;
+ int rate;
+ struct q6afe *afe;
+ struct list_head node;
+};
+
+struct afe_audioif_config_command {
+ struct apr_hdr hdr;
+ struct afe_port_cmd_set_param_v2 param;
+ struct afe_port_param_data_v2 pdata;
+ union afe_port_config port;
+} __packed;
+
+
+struct afe_audioif_config_command_v1 {
+ struct apr_hdr hdr;
+ u16 port_id;
+ union afe_port_config_v1 port;
+} __packed;
+
+>>>>>>>
+struct afe_port_map {
+ int port_id;
+ int token;
+ int is_rx;
+ int is_dig_pcm;
+};
+
+<<<<<<<
+/*
+ * Mapping between Virtual Port IDs to DSP AFE Port ID
+ * On B Family SoCs DSP Port IDs are consistent across multiple SoCs
+ * on A Family SoCs DSP port IDs are same as virtual Port IDs.
+ */
+
+static struct afe_port_map port_maps[AFE_PORT_MAX] = {
+ [HDMI_RX] = { AFE_PORT_ID_MULTICHAN_HDMI_RX, HDMI_RX, 1, 1},
+=======
+/* Port map of index vs real hw port ids */
+static struct afe_port_map port_maps[AFE_PORT_MAX] = {
+ [AFE_PORT_HDMI_RX] = { AFE_PORT_ID_MULTICHAN_HDMI_RX,
+ AFE_PORT_HDMI_RX, 1, 1},
+>>>>>>>
+ [SLIMBUS_0_RX] = { AFE_PORT_ID_SLIMBUS_MULTI_CHAN_0_RX,
+ SLIMBUS_0_RX, 1, 1},
+ [SLIMBUS_1_RX] = { AFE_PORT_ID_SLIMBUS_MULTI_CHAN_1_RX,
+ SLIMBUS_1_RX, 1, 1},
+ [SLIMBUS_2_RX] = { AFE_PORT_ID_SLIMBUS_MULTI_CHAN_2_RX,
+ SLIMBUS_2_RX, 1, 1},
+ [SLIMBUS_3_RX] = { AFE_PORT_ID_SLIMBUS_MULTI_CHAN_3_RX,
+ SLIMBUS_3_RX, 1, 1},
+ [SLIMBUS_4_RX] = { AFE_PORT_ID_SLIMBUS_MULTI_CHAN_4_RX,
+ SLIMBUS_4_RX, 1, 1},
+ [SLIMBUS_5_RX] = { AFE_PORT_ID_SLIMBUS_MULTI_CHAN_5_RX,
+ SLIMBUS_5_RX, 1, 1},
+<<<<<<<
+ [QUATERNARY_MI2S_RX] = { AFE_PORT_ID_QUATERNARY_MI2S_RX,
+ QUATERNARY_MI2S_RX, 1, 1},
+ [QUATERNARY_MI2S_TX] = { AFE_PORT_ID_QUATERNARY_MI2S_TX,
+ QUATERNARY_MI2S_TX, 0, 1},
+=======
+ [SLIMBUS_6_RX] = { AFE_PORT_ID_SLIMBUS_MULTI_CHAN_6_RX,
+ SLIMBUS_6_RX, 1, 1},
+ [PRIMARY_MI2S_RX] = { AFE_PORT_ID_PRIMARY_MI2S_RX,
+ PRIMARY_MI2S_RX, 1, 1},
+ [PRIMARY_MI2S_TX] = { AFE_PORT_ID_PRIMARY_MI2S_TX,
+ PRIMARY_MI2S_RX, 0, 1},
+>>>>>>>
+ [SECONDARY_MI2S_RX] = { AFE_PORT_ID_SECONDARY_MI2S_RX,
+ SECONDARY_MI2S_RX, 1, 1},
+ [SECONDARY_MI2S_TX] = { AFE_PORT_ID_SECONDARY_MI2S_TX,
+ SECONDARY_MI2S_TX, 0, 1},
+ [TERTIARY_MI2S_RX] = { AFE_PORT_ID_TERTIARY_MI2S_RX,
+ TERTIARY_MI2S_RX, 1, 1},
+ [TERTIARY_MI2S_TX] = { AFE_PORT_ID_TERTIARY_MI2S_TX,
+ TERTIARY_MI2S_TX, 0, 1},
+<<<<<<<
+ [PRIMARY_MI2S_RX] = { AFE_PORT_ID_PRIMARY_MI2S_RX,
+ PRIMARY_MI2S_RX, 1, 1},
+ [PRIMARY_MI2S_TX] = { AFE_PORT_ID_PRIMARY_MI2S_TX,
+ PRIMARY_MI2S_RX, 0, 1},
+ [SLIMBUS_6_RX] = { AFE_PORT_ID_SLIMBUS_MULTI_CHAN_6_RX,
+ SLIMBUS_6_RX, 1, 1},
+};
+
+static struct q6afe_port *afe_find_port(struct q6afe *afe, int token)
+{
+ struct q6afe_port *p = NULL;
+
+ spin_lock(&afe->port_list_lock);
+ list_for_each_entry(p, &afe->port_list, node)
+ if (p->token == token)
+ break;
+
+ spin_unlock(&afe->port_list_lock);
+ return p;
+}
+
+static int afe_callback(struct apr_device *adev,
+ struct apr_client_message *data)
+{
+ struct q6afe *afe = dev_get_drvdata(&adev->dev);
+ struct aprv2_ibasic_rsp_result_t *res;
+=======
+ [QUATERNARY_MI2S_RX] = { AFE_PORT_ID_QUATERNARY_MI2S_RX,
+ QUATERNARY_MI2S_RX, 1, 1},
+ [QUATERNARY_MI2S_TX] = { AFE_PORT_ID_QUATERNARY_MI2S_TX,
+ QUATERNARY_MI2S_TX, 0, 1},
+ [PRIMARY_TDM_RX_0] = { AFE_PORT_ID_PRIMARY_TDM_RX,
+ PRIMARY_TDM_RX_0, 1, 1},
+ [PRIMARY_TDM_TX_0] = { AFE_PORT_ID_PRIMARY_TDM_TX,
+ PRIMARY_TDM_TX_0, 0, 1},
+ [PRIMARY_TDM_RX_1] = { AFE_PORT_ID_PRIMARY_TDM_RX_1,
+ PRIMARY_TDM_RX_1, 1, 1},
+ [PRIMARY_TDM_TX_1] = { AFE_PORT_ID_PRIMARY_TDM_TX_1,
+ PRIMARY_TDM_TX_1, 0, 1},
+ [PRIMARY_TDM_RX_2] = { AFE_PORT_ID_PRIMARY_TDM_RX_2,
+ PRIMARY_TDM_RX_2, 1, 1},
+ [PRIMARY_TDM_TX_2] = { AFE_PORT_ID_PRIMARY_TDM_TX_2,
+ PRIMARY_TDM_TX_2, 0, 1},
+ [PRIMARY_TDM_RX_3] = { AFE_PORT_ID_PRIMARY_TDM_RX_3,
+ PRIMARY_TDM_RX_3, 1, 1},
+ [PRIMARY_TDM_TX_3] = { AFE_PORT_ID_PRIMARY_TDM_TX_3,
+ PRIMARY_TDM_TX_3, 0, 1},
+ [PRIMARY_TDM_RX_4] = { AFE_PORT_ID_PRIMARY_TDM_RX_4,
+ PRIMARY_TDM_RX_4, 1, 1},
+ [PRIMARY_TDM_TX_4] = { AFE_PORT_ID_PRIMARY_TDM_TX_4,
+ PRIMARY_TDM_TX_4, 0, 1},
+ [PRIMARY_TDM_RX_5] = { AFE_PORT_ID_PRIMARY_TDM_RX_5,
+ PRIMARY_TDM_RX_5, 1, 1},
+ [PRIMARY_TDM_TX_5] = { AFE_PORT_ID_PRIMARY_TDM_TX_5,
+ PRIMARY_TDM_TX_5, 0, 1},
+ [PRIMARY_TDM_RX_6] = { AFE_PORT_ID_PRIMARY_TDM_RX_6,
+ PRIMARY_TDM_RX_6, 1, 1},
+ [PRIMARY_TDM_TX_6] = { AFE_PORT_ID_PRIMARY_TDM_TX_6,
+ PRIMARY_TDM_TX_6, 0, 1},
+ [PRIMARY_TDM_RX_7] = { AFE_PORT_ID_PRIMARY_TDM_RX_7,
+ PRIMARY_TDM_RX_7, 1, 1},
+ [PRIMARY_TDM_TX_7] = { AFE_PORT_ID_PRIMARY_TDM_TX_7,
+ PRIMARY_TDM_TX_7, 0, 1},
+ [SECONDARY_TDM_RX_0] = { AFE_PORT_ID_SECONDARY_TDM_RX,
+ SECONDARY_TDM_RX_0, 1, 1},
+ [SECONDARY_TDM_TX_0] = { AFE_PORT_ID_SECONDARY_TDM_TX,
+ SECONDARY_TDM_TX_0, 0, 1},
+ [SECONDARY_TDM_RX_1] = { AFE_PORT_ID_SECONDARY_TDM_RX_1,
+ SECONDARY_TDM_RX_1, 1, 1},
+ [SECONDARY_TDM_TX_1] = { AFE_PORT_ID_SECONDARY_TDM_TX_1,
+ SECONDARY_TDM_TX_1, 0, 1},
+ [SECONDARY_TDM_RX_2] = { AFE_PORT_ID_SECONDARY_TDM_RX_2,
+ SECONDARY_TDM_RX_2, 1, 1},
+ [SECONDARY_TDM_TX_2] = { AFE_PORT_ID_SECONDARY_TDM_TX_2,
+ SECONDARY_TDM_TX_2, 0, 1},
+ [SECONDARY_TDM_RX_3] = { AFE_PORT_ID_SECONDARY_TDM_RX_3,
+ SECONDARY_TDM_RX_3, 1, 1},
+ [SECONDARY_TDM_TX_3] = { AFE_PORT_ID_SECONDARY_TDM_TX_3,
+ SECONDARY_TDM_TX_3, 0, 1},
+ [SECONDARY_TDM_RX_4] = { AFE_PORT_ID_SECONDARY_TDM_RX_4,
+ SECONDARY_TDM_RX_4, 1, 1},
+ [SECONDARY_TDM_TX_4] = { AFE_PORT_ID_SECONDARY_TDM_TX_4,
+ SECONDARY_TDM_TX_4, 0, 1},
+ [SECONDARY_TDM_RX_5] = { AFE_PORT_ID_SECONDARY_TDM_RX_5,
+ SECONDARY_TDM_RX_5, 1, 1},
+ [SECONDARY_TDM_TX_5] = { AFE_PORT_ID_SECONDARY_TDM_TX_5,
+ SECONDARY_TDM_TX_5, 0, 1},
+ [SECONDARY_TDM_RX_6] = { AFE_PORT_ID_SECONDARY_TDM_RX_6,
+ SECONDARY_TDM_RX_6, 1, 1},
+ [SECONDARY_TDM_TX_6] = { AFE_PORT_ID_SECONDARY_TDM_TX_6,
+ SECONDARY_TDM_TX_6, 0, 1},
+ [SECONDARY_TDM_RX_7] = { AFE_PORT_ID_SECONDARY_TDM_RX_7,
+ SECONDARY_TDM_RX_7, 1, 1},
+ [SECONDARY_TDM_TX_7] = { AFE_PORT_ID_SECONDARY_TDM_TX_7,
+ SECONDARY_TDM_TX_7, 0, 1},
+ [TERTIARY_TDM_RX_0] = { AFE_PORT_ID_TERTIARY_TDM_RX,
+ TERTIARY_TDM_RX_0, 1, 1},
+ [TERTIARY_TDM_TX_0] = { AFE_PORT_ID_TERTIARY_TDM_TX,
+ TERTIARY_TDM_TX_0, 0, 1},
+ [TERTIARY_TDM_RX_1] = { AFE_PORT_ID_TERTIARY_TDM_RX_1,
+ TERTIARY_TDM_RX_1, 1, 1},
+ [TERTIARY_TDM_TX_1] = { AFE_PORT_ID_TERTIARY_TDM_TX_1,
+ TERTIARY_TDM_TX_1, 0, 1},
+ [TERTIARY_TDM_RX_2] = { AFE_PORT_ID_TERTIARY_TDM_RX_2,
+ TERTIARY_TDM_RX_2, 1, 1},
+ [TERTIARY_TDM_TX_2] = { AFE_PORT_ID_TERTIARY_TDM_TX_2,
+ TERTIARY_TDM_TX_2, 0, 1},
+ [TERTIARY_TDM_RX_3] = { AFE_PORT_ID_TERTIARY_TDM_RX_3,
+ TERTIARY_TDM_RX_3, 1, 1},
+ [TERTIARY_TDM_TX_3] = { AFE_PORT_ID_TERTIARY_TDM_TX_3,
+ TERTIARY_TDM_TX_3, 0, 1},
+ [TERTIARY_TDM_RX_4] = { AFE_PORT_ID_TERTIARY_TDM_RX_4,
+ TERTIARY_TDM_RX_4, 1, 1},
+ [TERTIARY_TDM_TX_4] = { AFE_PORT_ID_TERTIARY_TDM_TX_4,
+ TERTIARY_TDM_TX_4, 0, 1},
+ [TERTIARY_TDM_RX_5] = { AFE_PORT_ID_TERTIARY_TDM_RX_5,
+ TERTIARY_TDM_RX_5, 1, 1},
+ [TERTIARY_TDM_TX_5] = { AFE_PORT_ID_TERTIARY_TDM_TX_5,
+ TERTIARY_TDM_TX_5, 0, 1},
+ [TERTIARY_TDM_RX_6] = { AFE_PORT_ID_TERTIARY_TDM_RX_6,
+ TERTIARY_TDM_RX_6, 1, 1},
+ [TERTIARY_TDM_TX_6] = { AFE_PORT_ID_TERTIARY_TDM_TX_6,
+ TERTIARY_TDM_TX_6, 0, 1},
+ [TERTIARY_TDM_RX_7] = { AFE_PORT_ID_TERTIARY_TDM_RX_7,
+ TERTIARY_TDM_RX_7, 1, 1},
+ [TERTIARY_TDM_TX_7] = { AFE_PORT_ID_TERTIARY_TDM_TX_7,
+ TERTIARY_TDM_TX_7, 0, 1},
+ [QUATERNARY_TDM_RX_0] = { AFE_PORT_ID_QUATERNARY_TDM_RX,
+ QUATERNARY_TDM_RX_0, 1, 1},
+ [QUATERNARY_TDM_TX_0] = { AFE_PORT_ID_QUATERNARY_TDM_TX,
+ QUATERNARY_TDM_TX_0, 0, 1},
+ [QUATERNARY_TDM_RX_1] = { AFE_PORT_ID_QUATERNARY_TDM_RX_1,
+ QUATERNARY_TDM_RX_1, 1, 1},
+ [QUATERNARY_TDM_TX_1] = { AFE_PORT_ID_QUATERNARY_TDM_TX_1,
+ QUATERNARY_TDM_TX_1, 0, 1},
+ [QUATERNARY_TDM_RX_2] = { AFE_PORT_ID_QUATERNARY_TDM_RX_2,
+ QUATERNARY_TDM_RX_2, 1, 1},
+ [QUATERNARY_TDM_TX_2] = { AFE_PORT_ID_QUATERNARY_TDM_TX_2,
+ QUATERNARY_TDM_TX_2, 0, 1},
+ [QUATERNARY_TDM_RX_3] = { AFE_PORT_ID_QUATERNARY_TDM_RX_3,
+ QUATERNARY_TDM_RX_3, 1, 1},
+ [QUATERNARY_TDM_TX_3] = { AFE_PORT_ID_QUATERNARY_TDM_TX_3,
+ QUATERNARY_TDM_TX_3, 0, 1},
+ [QUATERNARY_TDM_RX_4] = { AFE_PORT_ID_QUATERNARY_TDM_RX_4,
+ QUATERNARY_TDM_RX_4, 1, 1},
+ [QUATERNARY_TDM_TX_4] = { AFE_PORT_ID_QUATERNARY_TDM_TX_4,
+ QUATERNARY_TDM_TX_4, 0, 1},
+ [QUATERNARY_TDM_RX_5] = { AFE_PORT_ID_QUATERNARY_TDM_RX_5,
+ QUATERNARY_TDM_RX_5, 1, 1},
+ [QUATERNARY_TDM_TX_5] = { AFE_PORT_ID_QUATERNARY_TDM_TX_5,
+ QUATERNARY_TDM_TX_5, 0, 1},
+ [QUATERNARY_TDM_RX_6] = { AFE_PORT_ID_QUATERNARY_TDM_RX_6,
+ QUATERNARY_TDM_RX_6, 1, 1},
+ [QUATERNARY_TDM_TX_6] = { AFE_PORT_ID_QUATERNARY_TDM_TX_6,
+ QUATERNARY_TDM_TX_6, 0, 1},
+ [QUATERNARY_TDM_RX_7] = { AFE_PORT_ID_QUATERNARY_TDM_RX_7,
+ QUATERNARY_TDM_RX_7, 1, 1},
+ [QUATERNARY_TDM_TX_7] = { AFE_PORT_ID_QUATERNARY_TDM_TX_7,
+ QUATERNARY_TDM_TX_7, 0, 1},
+ [QUINARY_TDM_RX_0] = { AFE_PORT_ID_QUINARY_TDM_RX,
+ QUINARY_TDM_RX_0, 1, 1},
+ [QUINARY_TDM_TX_0] = { AFE_PORT_ID_QUINARY_TDM_TX,
+ QUINARY_TDM_TX_0, 0, 1},
+ [QUINARY_TDM_RX_1] = { AFE_PORT_ID_QUINARY_TDM_RX_1,
+ QUINARY_TDM_RX_1, 1, 1},
+ [QUINARY_TDM_TX_1] = { AFE_PORT_ID_QUINARY_TDM_TX_1,
+ QUINARY_TDM_TX_1, 0, 1},
+ [QUINARY_TDM_RX_2] = { AFE_PORT_ID_QUINARY_TDM_RX_2,
+ QUINARY_TDM_RX_2, 1, 1},
+ [QUINARY_TDM_TX_2] = { AFE_PORT_ID_QUINARY_TDM_TX_2,
+ QUINARY_TDM_TX_2, 0, 1},
+ [QUINARY_TDM_RX_3] = { AFE_PORT_ID_QUINARY_TDM_RX_3,
+ QUINARY_TDM_RX_3, 1, 1},
+ [QUINARY_TDM_TX_3] = { AFE_PORT_ID_QUINARY_TDM_TX_3,
+ QUINARY_TDM_TX_3, 0, 1},
+ [QUINARY_TDM_RX_4] = { AFE_PORT_ID_QUINARY_TDM_RX_4,
+ QUINARY_TDM_RX_4, 1, 1},
+ [QUINARY_TDM_TX_4] = { AFE_PORT_ID_QUINARY_TDM_TX_4,
+ QUINARY_TDM_TX_4, 0, 1},
+ [QUINARY_TDM_RX_5] = { AFE_PORT_ID_QUINARY_TDM_RX_5,
+ QUINARY_TDM_RX_5, 1, 1},
+ [QUINARY_TDM_TX_5] = { AFE_PORT_ID_QUINARY_TDM_TX_5,
+ QUINARY_TDM_TX_5, 0, 1},
+ [QUINARY_TDM_RX_6] = { AFE_PORT_ID_QUINARY_TDM_RX_6,
+ QUINARY_TDM_RX_6, 1, 1},
+ [QUINARY_TDM_TX_6] = { AFE_PORT_ID_QUINARY_TDM_TX_6,
+ QUINARY_TDM_TX_6, 0, 1},
+ [QUINARY_TDM_RX_7] = { AFE_PORT_ID_QUINARY_TDM_RX_7,
+ QUINARY_TDM_RX_7, 1, 1},
+ [QUINARY_TDM_TX_7] = { AFE_PORT_ID_QUINARY_TDM_TX_7,
+ QUINARY_TDM_TX_7, 0, 1},
+};
+
+static void q6afe_port_free(struct kref *ref)
+{
+ struct q6afe_port *port;
+ struct q6afe *afe;
+ unsigned long flags;
+
+ port = container_of(ref, struct q6afe_port, refcount);
+ afe = port->afe;
+ spin_lock_irqsave(&afe->port_list_lock, flags);
+ list_del(&port->node);
+ spin_unlock_irqrestore(&afe->port_list_lock, flags);
+ kfree(port->scfg);
+ kfree(port);
+}
+
+static struct q6afe_port *q6afe_find_port(struct q6afe *afe, int token)
+{
+ struct q6afe_port *p = NULL;
+ struct q6afe_port *ret = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&afe->port_list_lock, flags);
+ list_for_each_entry(p, &afe->port_list, node)
+ if (p->token == token) {
+ ret = p;
+ kref_get(&p->refcount);
+ break;
+ }
+
+ spin_unlock_irqrestore(&afe->port_list_lock, flags);
+ return ret;
+}
+
+static int q6afe_callback(struct apr_device *adev, struct apr_resp_pkt *data)
+{
+ struct q6afe *afe = dev_get_drvdata(&adev->dev);
+ struct aprv2_ibasic_rsp_result_t *res;
+ struct apr_hdr *hdr = &data->hdr;
+>>>>>>>
+ struct q6afe_port *port;
+
+ if (!data->payload_size)
+ return 0;
+
+ res = data->payload;
+<<<<<<<
+ if (data->opcode == APR_BASIC_RSP_RESULT) {
+ if (res->status) {
+ afe->status = res->status;
+ dev_err(afe->dev, "cmd = 0x%x returned error = 0x%x\n",
+ res->opcode, res->status);
+ }
+
+ switch (res->opcode) {
+ case AFE_PORT_CMD_START:
+ case AFE_PORT_CMD_STOP:
+ case AFE_PORT_MULTI_CHAN_HDMI_AUDIO_IF_CONFIG:
+ case AFE_PORT_CMD_SET_PARAM_V2:
+ case AFE_PORT_CMD_DEVICE_STOP:
+ case AFE_PORT_CMD_DEVICE_START:
+ afe->state = AFE_CMD_RESP_AVAIL;
+ port = afe_find_port(afe, data->token);
+ if (port)
+ wake_up(&port->wait);
+
+=======
+ switch (hdr->opcode) {
+ case APR_BASIC_RSP_RESULT: {
+ if (res->status) {
+ dev_err(afe->dev, "cmd = 0x%x returned error = 0x%x\n",
+ res->opcode, res->status);
+ }
+ switch (res->opcode) {
+ case AFE_PORT_CMD_SET_PARAM_V2:
+ case AFE_PORT_CMD_DEVICE_STOP:
+ case AFE_PORT_CMD_DEVICE_START:
+ case AFE_SVC_CMD_SET_PARAM:
+ port = q6afe_find_port(afe, hdr->token);
+ if (port) {
+ port->result = *res;
+ wake_up(&port->wait);
+ kref_put(&port->refcount, q6afe_port_free);
+ }
+>>>>>>>
+ break;
+ default:
+ dev_err(afe->dev, "Unknown cmd 0x%x\n", res->opcode);
+ break;
+ }
+ }
+<<<<<<<
+=======
+ break;
+ default:
+ break;
+ }
+>>>>>>>
+
+ return 0;
+}
+
+/**
+ * q6afe_get_port_id() - Get port id from a given port index
+ *
+ * @index: port index
+ *
+ * Return: Will be an negative on error or valid port_id on success
+ */
+int q6afe_get_port_id(int index)
+{
+ if (index < 0 || index > AFE_PORT_MAX)
+ return -EINVAL;
+
+ return port_maps[index].port_id;
+}
+EXPORT_SYMBOL_GPL(q6afe_get_port_id);
+
+<<<<<<<
+static int afe_apr_send_pkt(struct q6afe *afe, struct apr_pkt *pkt,
+ struct q6afe_port *port)
+{
+ wait_queue_head_t *wait = &port->wait;
+ struct apr_hdr *hdr = &pkt->hdr;
+ int ret;
+
+ mutex_lock(&afe->lock);
+ port->result.opcode = 0;
+ port->result.status = 0;
+
+ ret = apr_send_pkt(afe->apr, pkt);
+ if (ret < 0) {
+ dev_err(afe->dev, "packet not transmitted (%d)\n", ret);
+=======
+static int afe_apr_send_pkt(struct q6afe *afe, void *data,
+ wait_queue_head_t *wait)
+{
+ int ret;
+
+ mutex_lock(&afe->lock);
+ afe->status = 0;
+ afe->state = AFE_CMD_RESP_NONE;
+
+ ret = apr_send_pkt(afe->apr, data);
+ if (ret < 0) {
+ dev_err(afe->dev, "packet not transmitted\n");
+>>>>>>>
+ ret = -EINVAL;
+ goto err;
+ }
+
+<<<<<<<
+ ret = wait_event_timeout(*wait, (afe->state == AFE_CMD_RESP_AVAIL),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ ret = -ETIMEDOUT;
+ } else if (afe->status > 0) {
+ dev_err(afe->dev, "DSP returned error[%s]\n",
+ q6dsp_strerror(afe->status));
+ ret = q6dsp_errno(afe->status);
+=======
+ ret = wait_event_timeout(*wait, (port->result.opcode == hdr->opcode),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ ret = -ETIMEDOUT;
+ } else if (port->result.status > 0) {
+ dev_err(afe->dev, "DSP returned error[%x]\n",
+ port->result.status);
+ ret = -EINVAL;
+>>>>>>>
+ } else {
+ ret = 0;
+ }
+
+err:
+ mutex_unlock(&afe->lock);
+
+ return ret;
+}
+
+<<<<<<<
+static int afe_send_cmd_port_start(struct q6afe_port *port)
+{
+ u16 port_id = port->id;
+ struct afe_port_cmd_device_start start;
+ struct q6afe *afe = port->afe;
+ int ret;
+
+ start.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ start.hdr.pkt_size = sizeof(start);
+ start.hdr.src_port = 0;
+ start.hdr.dest_port = 0;
+ start.hdr.token = port->token;
+ start.hdr.opcode = AFE_PORT_CMD_DEVICE_START;
+ start.port_id = port_id;
+
+ ret = afe_apr_send_pkt(afe, &start, &port->wait);
+=======
+static int q6afe_port_set_param(struct q6afe_port *port, void *data,
+ int param_id, int module_id, int psize)
+{
+ struct afe_svc_cmd_set_param *param;
+ struct afe_port_param_data_v2 *pdata;
+ struct q6afe *afe = port->afe;
+ struct apr_pkt *pkt;
+ u16 port_id = port->id;
+ int ret, pkt_size;
+ void *p, *pl;
+
+ pkt_size = APR_HDR_SIZE + sizeof(*param) + sizeof(*pdata) + psize;
+ p = kzalloc(pkt_size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ param = p + APR_HDR_SIZE;
+ pdata = p + APR_HDR_SIZE + sizeof(*param);
+ pl = p + APR_HDR_SIZE + sizeof(*param) + sizeof(*pdata);
+ memcpy(pl, data, psize);
+
+ pkt->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ pkt->hdr.pkt_size = pkt_size;
+ pkt->hdr.src_port = 0;
+ pkt->hdr.dest_port = 0;
+ pkt->hdr.token = port->token;
+ pkt->hdr.opcode = AFE_SVC_CMD_SET_PARAM;
+
+ param->payload_size = sizeof(*pdata) + psize;
+ param->payload_address_lsw = 0x00;
+ param->payload_address_msw = 0x00;
+ param->mem_map_handle = 0x00;
+ pdata->module_id = module_id;
+ pdata->param_id = param_id;
+ pdata->param_size = psize;
+
+ ret = afe_apr_send_pkt(afe, pkt, port);
+>>>>>>>
+ if (ret)
+ dev_err(afe->dev, "AFE enable for port 0x%x failed %d\n",
+ port_id, ret);
+
+<<<<<<<
+=======
+ kfree(pkt);
+>>>>>>>
+ return ret;
+}
+
+static int q6afe_port_set_param_v2(struct q6afe_port *port, void *data,
+<<<<<<<
+ int param_id, int module_id, int psize)
+{
+ struct afe_port_cmd_set_param_v2 *param;
+ struct afe_port_param_data_v2 *pdata;
+ struct q6afe *afe = port->afe;
+ struct apr_pkt *pkt;
+ u16 port_id = port->id;
+ int ret, pkt_size;
+ void *p, *pl;
+
+ pkt_size = APR_HDR_SIZE + sizeof(*param) + sizeof(*pdata) + psize;
+ p = kzalloc(pkt_size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ param = p + APR_HDR_SIZE;
+ pdata = p + APR_HDR_SIZE + sizeof(*param);
+ pl = p + APR_HDR_SIZE + sizeof(*param) + sizeof(*pdata);
+ memcpy(pl, data, psize);
+
+ pkt->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ pkt->hdr.pkt_size = pkt_size;
+ pkt->hdr.src_port = 0;
+ pkt->hdr.dest_port = 0;
+ pkt->hdr.token = port->token;
+ pkt->hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+
+=======
+ int param_id, int psize)
+{
+ struct apr_hdr *hdr;
+ struct afe_port_cmd_set_param_v2 *param;
+ struct afe_port_param_data_v2 *pdata;
+ struct q6afe *afe = port->afe;
+ u16 port_id = port->id;
+ int ret;
+
+ hdr = data;
+ param = data + sizeof(*hdr);
+ pdata = data + sizeof(*hdr) + sizeof(*param);
+
+ hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ hdr->pkt_size = sizeof(*hdr) + sizeof(*param) +
+ sizeof(*pdata) + psize;
+ hdr->src_port = 0;
+ hdr->dest_port = 0;
+ hdr->token = port->token;
+ hdr->opcode = AFE_PORT_CMD_SET_PARAM_V2;
+>>>>>>>
+ param->port_id = port_id;
+ param->payload_size = sizeof(*pdata) + psize;
+ param->payload_address_lsw = 0x00;
+ param->payload_address_msw = 0x00;
+ param->mem_map_handle = 0x00;
+<<<<<<<
+ pdata->module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+ pdata->param_id = param_id;
+ pdata->param_size = psize;
+
+ ret = afe_apr_send_pkt(afe, data, &port->wait);
+=======
+ pdata->module_id = module_id;
+ pdata->param_id = param_id;
+ pdata->param_size = psize;
+
+ ret = afe_apr_send_pkt(afe, pkt, port);
+>>>>>>>
+ if (ret)
+ dev_err(afe->dev, "AFE enable for port 0x%x failed %d\n",
+ port_id, ret);
+
+<<<<<<<
+ kfree(pkt);
+=======
+
+>>>>>>>
+ return ret;
+}
+
+static int q6afe_set_lpass_clock(struct q6afe_port *port,
+ struct afe_clk_cfg *cfg)
+{
+<<<<<<<
+ return q6afe_port_set_param_v2(port, cfg,
+ AFE_PARAM_ID_LPAIF_CLK_CONFIG,
+ AFE_MODULE_AUDIO_DEV_INTERFACE,
+ sizeof(*cfg));
+}
+
+static int q6afe_set_lpass_clock_v2(struct q6afe_port *port,
+ struct afe_clk_set *cfg)
+{
+ return q6afe_port_set_param(port, cfg, AFE_PARAM_ID_CLOCK_SET,
+ AFE_MODULE_CLOCK_SET, sizeof(*cfg));
+=======
+ struct afe_lpass_clk_config_command clk_cfg = {0};
+ int param_id = AFE_PARAM_ID_LPAIF_CLK_CONFIG;
+ struct q6afe *afe = port->afe;
+
+ if (!cfg) {
+ dev_err(afe->dev, "clock cfg is NULL\n");
+ return -EINVAL;
+ }
+
+ clk_cfg.clk_cfg = *cfg;
+
+ return q6afe_port_set_param_v2(port, &clk_cfg, param_id, sizeof(*cfg));
+>>>>>>>
+}
+
+static int q6afe_set_digital_codec_core_clock(struct q6afe_port *port,
+ struct afe_digital_clk_cfg *cfg)
+{
+<<<<<<<
+ return q6afe_port_set_param_v2(port, cfg,
+ AFE_PARAM_ID_INT_DIGITAL_CDC_CLK_CONFIG,
+ AFE_MODULE_AUDIO_DEV_INTERFACE,
+ sizeof(*cfg));
+=======
+ struct afe_lpass_digital_clk_config_command clk_cfg = {0};
+ int param_id = AFE_PARAM_ID_INTERNAL_DIGITAL_CDC_CLK_CONFIG;
+ struct q6afe *afe = port->afe;
+
+ if (!cfg) {
+ dev_err(afe->dev, "clock cfg is NULL\n");
+ return -EINVAL;
+ }
+
+ clk_cfg.clk_cfg = *cfg;
+
+ return q6afe_port_set_param_v2(port, &clk_cfg, param_id, sizeof(*cfg));
+>>>>>>>
+}
+
+int q6afe_port_set_sysclk(struct q6afe_port *port, int clk_id,
+ int clk_src, int clk_root,
+ unsigned int freq, int dir)
+{
+ struct afe_clk_cfg ccfg = {0,};
+<<<<<<<
+=======
+ struct afe_clk_set cset = {0,};
+>>>>>>>
+ struct afe_digital_clk_cfg dcfg = {0,};
+ int ret;
+
+ switch (clk_id) {
+ case LPAIF_DIG_CLK:
+ dcfg.i2s_cfg_minor_version = AFE_API_VERSION_I2S_CONFIG;
+ dcfg.clk_val = freq;
+ dcfg.clk_root = clk_root;
+ ret = q6afe_set_digital_codec_core_clock(port, &dcfg);
+ break;
+ case LPAIF_BIT_CLK:
+ ccfg.i2s_cfg_minor_version = AFE_API_VERSION_I2S_CONFIG;
+ ccfg.clk_val1 = freq;
+ ccfg.clk_src = clk_src;
+ ccfg.clk_root = clk_root;
+ ccfg.clk_set_mode = Q6AFE_LPASS_MODE_CLK1_VALID;
+ ret = q6afe_set_lpass_clock(port, &ccfg);
+ break;
+
+ case LPAIF_OSR_CLK:
+ ccfg.i2s_cfg_minor_version = AFE_API_VERSION_I2S_CONFIG;
+ ccfg.clk_val2 = freq;
+ ccfg.clk_src = clk_src;
+ ccfg.clk_root = clk_root;
+ ccfg.clk_set_mode = Q6AFE_LPASS_MODE_CLK2_VALID;
+ ret = q6afe_set_lpass_clock(port, &ccfg);
+ break;
+<<<<<<<
+=======
+ case Q6AFE_LPASS_CLK_ID_PRI_MI2S_IBIT ... Q6AFE_LPASS_CLK_ID_QUI_MI2S_OSR:
+ case Q6AFE_LPASS_CLK_ID_MCLK_1 ... Q6AFE_LPASS_CLK_ID_INT_MCLK_1:
+ case Q6AFE_LPASS_CLK_ID_PRI_TDM_IBIT ... Q6AFE_LPASS_CLK_ID_QUIN_TDM_EBIT:
+ cset.clk_set_minor_version = AFE_API_VERSION_CLOCK_SET;
+ cset.clk_id = clk_id;
+ cset.clk_freq_in_hz = freq;
+ cset.clk_attri = clk_src;
+ cset.clk_root = clk_root;
+ cset.enable = !!freq;
+ ret = q6afe_set_lpass_clock_v2(port, &cset);
+ break;
+>>>>>>>
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(q6afe_port_set_sysclk);
+
+<<<<<<<
+/**
+ * q6afe_port_stop() - Stop a afe port
+ *
+ * @port: Instance of port to stop
+ *
+ * Return: Will be an negative on packet size on success.
+ */
+int q6afe_port_stop(struct q6afe_port *port)
+{
+ struct afe_port_cmd_device_stop *stop;
+ struct q6afe *afe = port->afe;
+ struct apr_pkt *pkt;
+ int port_id = port->id;
+ int ret = 0;
+ int index, pkt_size;
+ void *p;
+=======
+static int q6afev2_port_start(struct q6afe_port *port)
+{
+ union afe_port_config *afe_config = &port->port_cfg;
+ struct afe_audioif_config_command config = {0,};
+ struct q6afe *afe = port->afe;
+ int port_id = port->id;
+ int ret, param_id = port->cfg_type;
+
+ config.port = *afe_config;
+
+ ret = q6afe_port_set_param_v2(port, &config, param_id,
+ sizeof(*afe_config));
+ if (ret) {
+ dev_err(afe->dev, "AFE enable for port 0x%x failed %d\n",
+ port_id, ret);
+ return ret;
+ }
+ return afe_send_cmd_port_start(port);
+}
+
+static int q6afev1_port_start(struct q6afe_port *port)
+{
+ union afe_port_config_v1 *afe_config = &port->port_cfgv1;
+ struct afe_port_start_command_v1 start;
+ struct afe_audioif_config_command_v1 config = {0,};
+ struct q6afe *afe = port->afe;
+ int port_id = port->id;
+ struct apr_hdr *hdr;
+ int ret;
+
+ hdr = &config.hdr;
+
+ hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ hdr->pkt_size = sizeof(config);
+ hdr->src_port = 0;
+ hdr->dest_port = 0;
+ hdr->token = port_id;
+ hdr->opcode = port->cfg_type;
+ config.port_id = port_id;
+ config.port = *afe_config;
+
+ ret = afe_apr_send_pkt(afe, &config, &port->wait);
+ if (ret) {
+ dev_err(afe->dev, "AFE enable for port 0x%x failed %d\n",
+ port_id, ret);
+ return ret;
+ }
+
+ hdr = &start.hdr;
+ hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ hdr->pkt_size = sizeof(start);
+ hdr->src_port = 0;
+ hdr->dest_port = 0;
+ hdr->token = port_id;
+ hdr->opcode = AFE_PORT_CMD_START;
+ start.port_id = port_id;
+ start.gain = 0x2000;
+ start.sample_rate = port->rate;
+
+ return afe_apr_send_pkt(afe, &start, &port->wait);
+}
+
+static int q6afev1_port_stop(struct q6afe_port *port)
+{
+ int port_id = port->id;
+ struct afe_port_stop_command_v1 stop;
+ struct q6afe *afe = port->afe;
+ int ret = 0;
+ int index = 0;
+>>>>>>>
+
+ port_id = port->id;
+ index = port->token;
+ if (index < 0 || index > AFE_PORT_MAX) {
+ dev_err(afe->dev, "AFE port index[%d] invalid!\n", index);
+ return -EINVAL;
+ }
+
+<<<<<<<
+ pkt_size = APR_HDR_SIZE + sizeof(*stop);
+ p = kzalloc(pkt_size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ stop = p + APR_HDR_SIZE;
+
+ pkt->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ pkt->hdr.pkt_size = pkt_size;
+ pkt->hdr.src_port = 0;
+ pkt->hdr.dest_port = 0;
+ pkt->hdr.token = index;
+ pkt->hdr.opcode = AFE_PORT_CMD_DEVICE_STOP;
+ stop->port_id = port_id;
+ stop->reserved = 0;
+
+ ret = afe_apr_send_pkt(afe, pkt, port);
+ if (ret)
+ dev_err(afe->dev, "AFE close failed %d\n", ret);
+
+ kfree(pkt);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(q6afe_port_stop);
+
+/**
+=======
+ stop.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ stop.hdr.pkt_size = sizeof(stop);
+ stop.hdr.src_port = 0;
+ stop.hdr.dest_port = 0;
+ stop.hdr.token = port_id;
+ stop.hdr.opcode = AFE_PORT_CMD_STOP;
+ stop.port_id = port_id;
+ stop.reserved = 0;
+
+ ret = afe_apr_send_pkt(afe, &stop, &port->wait);
+ if (ret)
+ dev_err(afe->dev, "AFE close failed %d\n", ret);
+
+ return ret;
+}
+
+static int q6afev2_port_stop(struct q6afe_port *port)
+{
+ int port_id = port->id;
+ struct afe_port_cmd_device_stop stop;
+ struct q6afe *afe = port->afe;
+ int ret = 0;
+ int index = 0;
+
+ port_id = port->id;
+ index = port->token;
+ if (index < 0 || index > AFE_PORT_MAX) {
+ dev_err(afe->dev, "AFE port index[%d] invalid!\n", index);
+ return -EINVAL;
+ }
+
+ stop.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ stop.hdr.pkt_size = sizeof(stop);
+ stop.hdr.src_port = 0;
+ stop.hdr.dest_port = 0;
+ stop.hdr.token = index;
+ stop.hdr.opcode = AFE_PORT_CMD_DEVICE_STOP;
+ stop.port_id = port_id;
+ stop.reserved = 0;
+
+ ret = afe_apr_send_pkt(afe, &stop, &port->wait);
+ if (ret)
+ dev_err(afe->dev, "AFE close failed %d\n", ret);
+
+ return ret;
+}
+/**
+ * q6afe_port_stop() - Stop a afe port
+ *
+ * @port: Instance of port to stop
+ *
+ * Return: Will be an negative on packet size on success.
+ */
+int q6afe_port_stop(struct q6afe_port *port)
+{
+ return port->afe->ops->port_stop(port);
+}
+EXPORT_SYMBOL_GPL(q6afe_port_stop);
+
+/**
+ * q6afe_set_dai_data() - set dai private data
+ *
+ * @dev: Pointer to afe device.
+ * @data: dai private data
+ *
+ */
+void q6afe_set_dai_data(struct device *dev, void *data)
+{
+ struct q6afe *afe = dev_get_drvdata(dev);
+
+ afe->dai_data = data;
+}
+EXPORT_SYMBOL_GPL(q6afe_set_dai_data);
+
+/**
+ * q6afe_get_dai_data() - get dai private data
+ *
+ * @dev: Pointer to afe device.
+ *
+ * Return: pointer to dai private data
+ */
+void *q6afe_get_dai_data(struct device *dev)
+{
+ struct q6afe *afe = dev_get_drvdata(dev);
+
+ return afe->dai_data;
+}
+EXPORT_SYMBOL_GPL(q6afe_get_dai_data);
+
+/**
+>>>>>>>
+ * q6afe_slim_port_prepare() - Prepare slim afe port.
+ *
+ * @port: Instance of afe port
+ * @cfg: SLIM configuration for the afe port
+ *
+ */
+void q6afe_slim_port_prepare(struct q6afe_port *port,
+ struct q6afe_slim_cfg *cfg)
+{
+ union afe_port_config *pcfg = &port->port_cfg;
+
+ pcfg->slim_cfg.sb_cfg_minor_version = AFE_API_VERSION_SLIMBUS_CONFIG;
+ pcfg->slim_cfg.sample_rate = cfg->sample_rate;
+ pcfg->slim_cfg.bit_width = cfg->bit_width;
+ pcfg->slim_cfg.num_channels = cfg->num_channels;
+ pcfg->slim_cfg.data_format = cfg->data_format;
+ pcfg->slim_cfg.shared_ch_mapping[0] = cfg->ch_mapping[0];
+ pcfg->slim_cfg.shared_ch_mapping[1] = cfg->ch_mapping[1];
+ pcfg->slim_cfg.shared_ch_mapping[2] = cfg->ch_mapping[2];
+ pcfg->slim_cfg.shared_ch_mapping[3] = cfg->ch_mapping[3];
+
+}
+EXPORT_SYMBOL_GPL(q6afe_slim_port_prepare);
+
+<<<<<<<
+/**
+ * q6afe_tdm_port_prepare() - Prepare tdm afe port.
+ *
+ * @port: Instance of afe port
+ * @cfg: TDM configuration for the afe port
+ *
+ */
+void q6afe_tdm_port_prepare(struct q6afe_port *port,
+ struct q6afe_tdm_cfg *cfg)
+{
+ union afe_port_config *pcfg = &port->port_cfg;
+
+ pcfg->tdm_cfg.tdm_cfg_minor_version = AFE_API_VERSION_TDM_CONFIG;
+ pcfg->tdm_cfg.num_channels = cfg->num_channels;
+ pcfg->tdm_cfg.sample_rate = cfg->sample_rate;
+ pcfg->tdm_cfg.bit_width = cfg->bit_width;
+ pcfg->tdm_cfg.data_format = cfg->data_format;
+ pcfg->tdm_cfg.sync_mode = cfg->sync_mode;
+ pcfg->tdm_cfg.sync_src = cfg->sync_src;
+ pcfg->tdm_cfg.nslots_per_frame = cfg->nslots_per_frame;
+
+ pcfg->tdm_cfg.slot_width = cfg->slot_width;
+ pcfg->tdm_cfg.slot_mask = cfg->slot_mask;
+ port->scfg = kzalloc(sizeof(*port->scfg), GFP_KERNEL);
+ if (!port->scfg)
+ return;
+
+ port->scfg->minor_version = AFE_API_VERSION_SLOT_MAPPING_CONFIG;
+ port->scfg->num_channels = cfg->num_channels;
+ port->scfg->bitwidth = cfg->bit_width;
+ port->scfg->data_align_type = cfg->data_align_type;
+ memcpy(port->scfg->ch_mapping, cfg->ch_mapping,
+ sizeof(u16) * AFE_PORT_MAX_AUDIO_CHAN_CNT);
+}
+EXPORT_SYMBOL_GPL(q6afe_tdm_port_prepare);
+=======
+void q6afev1_hdmi_port_prepare(struct q6afe_port *port,
+ struct q6afe_hdmi_cfg *cfg)
+{
+ union afe_port_config_v1 *pcfg = &port->port_cfgv1;
+
+ pcfg->hdmi_multi_ch.data_type = cfg->datatype;
+ pcfg->hdmi_multi_ch.channel_allocation = cfg->channel_allocation;
+ pcfg->hdmi_multi_ch.reserved = 0;
+ port->rate = cfg->sample_rate;
+ //FIXME RATE
+// pcfg->hdmi_multi_ch.sample_rate = cfg->sample_rate;
+// pcfg->hdmi_multi_ch.bit_width = cfg->bit_width;
+}
+
+void q6afev2_hdmi_port_prepare(struct q6afe_port *port,
+ struct q6afe_hdmi_cfg *cfg)
+{
+ union afe_port_config *pcfg = &port->port_cfg;
+
+ pcfg->hdmi_multi_ch.hdmi_cfg_minor_version =
+ AFE_API_VERSION_HDMI_CONFIG;
+ pcfg->hdmi_multi_ch.datatype = cfg->datatype;
+ pcfg->hdmi_multi_ch.channel_allocation = cfg->channel_allocation;
+ pcfg->hdmi_multi_ch.sample_rate = cfg->sample_rate;
+ pcfg->hdmi_multi_ch.bit_width = cfg->bit_width;
+}
+>>>>>>>
+
+/**
+ * q6afe_hdmi_port_prepare() - Prepare hdmi afe port.
+ *
+ * @port: Instance of afe port
+ * @cfg: HDMI configuration for the afe port
+ *
+ */
+void q6afe_hdmi_port_prepare(struct q6afe_port *port,
+ struct q6afe_hdmi_cfg *cfg)
+{
+<<<<<<<
+ port->afe->ops->hdmi_prepare(port, cfg);
+=======
+ union afe_port_config *pcfg = &port->port_cfg;
+
+ pcfg->hdmi_multi_ch.hdmi_cfg_minor_version =
+ AFE_API_VERSION_HDMI_CONFIG;
+ pcfg->hdmi_multi_ch.datatype = cfg->datatype;
+ pcfg->hdmi_multi_ch.channel_allocation = cfg->channel_allocation;
+ pcfg->hdmi_multi_ch.sample_rate = cfg->sample_rate;
+ pcfg->hdmi_multi_ch.bit_width = cfg->bit_width;
+>>>>>>>
+}
+EXPORT_SYMBOL_GPL(q6afe_hdmi_port_prepare);
+
+/**
+ * q6afe_i2s_port_prepare() - Prepare i2s afe port.
+ *
+ * @port: Instance of afe port
+ * @cfg: I2S configuration for the afe port
+<<<<<<<
+ *
+ */
+void q6afe_i2s_port_prepare(struct q6afe_port *port, struct q6afe_i2s_cfg *cfg)
+{
+ union afe_port_config *pcfg = &port->port_cfg;
+=======
+ * Return: Will be an negative on error and zero on success.
+ */
+int q6afe_i2s_port_prepare(struct q6afe_port *port, struct q6afe_i2s_cfg *cfg)
+{
+ union afe_port_config *pcfg = &port->port_cfg;
+ struct device *dev = port->afe->dev;
+ int num_sd_lines;
+>>>>>>>
+
+ pcfg->i2s_cfg.i2s_cfg_minor_version = AFE_API_VERSION_I2S_CONFIG;
+ pcfg->i2s_cfg.sample_rate = cfg->sample_rate;
+ pcfg->i2s_cfg.bit_width = cfg->bit_width;
+ pcfg->i2s_cfg.data_format = AFE_LINEAR_PCM_DATA;
+
+ switch (cfg->fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ pcfg->i2s_cfg.ws_src = AFE_PORT_CONFIG_I2S_WS_SRC_INTERNAL;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFM:
+ /* CPU is slave */
+ pcfg->i2s_cfg.ws_src = AFE_PORT_CONFIG_I2S_WS_SRC_EXTERNAL;
+ break;
+ default:
+ break;
+ }
+
+<<<<<<<
+ num_sd_lines = hweight_long(cfg->sd_line_mask);
+
+ switch (num_sd_lines) {
+ case 0:
+ dev_err(dev, "no line is assigned\n");
+ return -EINVAL;
+ case 1:
+ switch (cfg->sd_line_mask) {
+ case AFE_PORT_I2S_SD0_MASK:
+ pcfg->i2s_cfg.channel_mode = AFE_PORT_I2S_SD0;
+ break;
+ case AFE_PORT_I2S_SD1_MASK:
+ pcfg->i2s_cfg.channel_mode = AFE_PORT_I2S_SD1;
+ break;
+ case AFE_PORT_I2S_SD2_MASK:
+ pcfg->i2s_cfg.channel_mode = AFE_PORT_I2S_SD2;
+ break;
+ case AFE_PORT_I2S_SD3_MASK:
+ pcfg->i2s_cfg.channel_mode = AFE_PORT_I2S_SD3;
+ break;
+ default:
+ dev_err(dev, "Invalid SD lines\n");
+ return -EINVAL;
+ }
+ break;
+ case 2:
+ switch (cfg->sd_line_mask) {
+ case AFE_PORT_I2S_SD0_1_MASK:
+ pcfg->i2s_cfg.channel_mode = AFE_PORT_I2S_QUAD01;
+ break;
+ case AFE_PORT_I2S_SD2_3_MASK:
+ pcfg->i2s_cfg.channel_mode = AFE_PORT_I2S_QUAD23;
+ break;
+ default:
+ dev_err(dev, "Invalid SD lines\n");
+ return -EINVAL;
+ }
+ break;
+ case 3:
+ switch (cfg->sd_line_mask) {
+ case AFE_PORT_I2S_SD0_1_2_MASK:
+ pcfg->i2s_cfg.channel_mode = AFE_PORT_I2S_6CHS;
+ break;
+ default:
+ dev_err(dev, "Invalid SD lines\n");
+ return -EINVAL;
+ }
+ break;
+ case 4:
+ switch (cfg->sd_line_mask) {
+ case AFE_PORT_I2S_SD0_1_2_3_MASK:
+ pcfg->i2s_cfg.channel_mode = AFE_PORT_I2S_8CHS;
+
+ break;
+ default:
+ dev_err(dev, "Invalid SD lines\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ dev_err(dev, "Invalid SD lines\n");
+ return -EINVAL;
+ }
+
+ switch (cfg->num_channels) {
+ case 1:
+ case 2:
+ switch (pcfg->i2s_cfg.channel_mode) {
+ case AFE_PORT_I2S_QUAD01:
+ case AFE_PORT_I2S_6CHS:
+ case AFE_PORT_I2S_8CHS:
+ pcfg->i2s_cfg.channel_mode = AFE_PORT_I2S_SD0;
+ break;
+ case AFE_PORT_I2S_QUAD23:
+ pcfg->i2s_cfg.channel_mode = AFE_PORT_I2S_SD2;
+ break;
+ }
+
+ if (cfg->num_channels == 2)
+ pcfg->i2s_cfg.mono_stereo = AFE_PORT_I2S_STEREO;
+ else
+ pcfg->i2s_cfg.mono_stereo = AFE_PORT_I2S_MONO;
+
+ break;
+ case 3:
+ case 4:
+ if (pcfg->i2s_cfg.channel_mode < AFE_PORT_I2S_QUAD01) {
+ dev_err(dev, "Invalid Channel mode\n");
+ return -EINVAL;
+ }
+ break;
+ case 5:
+ case 6:
+ if (pcfg->i2s_cfg.channel_mode < AFE_PORT_I2S_6CHS) {
+ dev_err(dev, "Invalid Channel mode\n");
+ return -EINVAL;
+ }
+ break;
+ case 7:
+ case 8:
+ if (pcfg->i2s_cfg.channel_mode < AFE_PORT_I2S_8CHS) {
+ dev_err(dev, "Invalid Channel mode\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+=======
+ switch (cfg->num_channels) {
+ case 1:
+ pcfg->i2s_cfg.mono_stereo = AFE_PORT_I2S_MONO;
+ pcfg->i2s_cfg.channel_mode = AFE_PORT_I2S_SD0;
+ break;
+ case 2:
+ pcfg->i2s_cfg.channel_mode = AFE_PORT_I2S_SD0;
+ pcfg->i2s_cfg.mono_stereo = AFE_PORT_I2S_STEREO;
+ break;
+ case 3:
+ case 4:
+ pcfg->i2s_cfg.channel_mode = AFE_PORT_I2S_QUAD01;
+ break;
+ case 5:
+ case 6:
+ pcfg->i2s_cfg.channel_mode = AFE_PORT_I2S_6CHS;
+ break;
+ case 7:
+ case 8:
+ pcfg->i2s_cfg.channel_mode = AFE_PORT_I2S_8CHS;
+ break;
+ default:
+ break;
+ }
+>>>>>>>
+}
+EXPORT_SYMBOL_GPL(q6afe_i2s_port_prepare);
+
+/**
+ * q6afe_port_start() - Start a afe port
+ *
+ * @port: Instance of port to start
+ *
+ * Return: Will be an negative on packet size on success.
+ */
+int q6afe_port_start(struct q6afe_port *port)
+{
+<<<<<<<
+ return port->afe->ops->port_start(port);
+}
+EXPORT_SYMBOL_GPL(q6afe_port_start);
+
+struct q6afe_port *q6afev1_port_get_from_id(struct device *dev, int port_id)
+{
+ struct q6afe *afe = dev_get_drvdata(dev);
+ struct q6afe_port *port;
+ int cfg_type;
+
+ if (port_id < 0 || port_id > AFE_PORT_MAX) {
+ dev_err(dev, "AFE port token[%d] invalid!\n", port_id);
+ return ERR_PTR(-EINVAL);
+ }
+
+
+ switch (port_id) {
+ case AFE_PORT_HDMI_RX:
+ cfg_type = AFE_PORT_MULTI_CHAN_HDMI_AUDIO_IF_CONFIG;
+ break;
+ default:
+ dev_err(dev, "Invalid port id 0x%x\n", port_id);
+ return ERR_PTR(-EINVAL);
+ }
+
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return ERR_PTR(-ENOMEM);
+
+ init_waitqueue_head(&port->wait);
+
+ port->token = port_id;
+ port->id = port_id;
+ port->afe = afe;
+ port->cfg_type = cfg_type;
+
+ spin_lock(&afe->port_list_lock);
+ list_add_tail(&port->node, &afe->port_list);
+ spin_unlock(&afe->port_list_lock);
+
+ return port;
+
+}
+
+struct q6afe_port *q6afev2_port_get_from_id(struct device *dev, int id)
+{
+ int port_id;
+ struct q6afe *afe = dev_get_drvdata(dev);
+ struct q6afe_port *port;
+=======
+ struct afe_port_cmd_device_start *start;
+ struct q6afe *afe = port->afe;
+ int port_id = port->id;
+ int ret, param_id = port->cfg_type;
+ struct apr_pkt *pkt;
+ int pkt_size;
+ void *p;
+
+ ret = q6afe_port_set_param_v2(port, &port->port_cfg, param_id,
+ AFE_MODULE_AUDIO_DEV_INTERFACE,
+ sizeof(port->port_cfg));
+ if (ret) {
+ dev_err(afe->dev, "AFE enable for port 0x%x failed %d\n",
+ port_id, ret);
+ return ret;
+ }
+
+ if (port->scfg) {
+ ret = q6afe_port_set_param_v2(port, port->scfg,
+ AFE_PARAM_ID_PORT_SLOT_MAPPING_CONFIG,
+ AFE_MODULE_TDM, sizeof(*port->scfg));
+ if (ret) {
+ dev_err(afe->dev, "AFE enable for port 0x%x failed %d\n",
+ port_id, ret);
+ return ret;
+ }
+ }
+
+ pkt_size = APR_HDR_SIZE + sizeof(*start);
+ p = kzalloc(pkt_size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ start = p + APR_HDR_SIZE;
+
+ pkt->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ pkt->hdr.pkt_size = pkt_size;
+ pkt->hdr.src_port = 0;
+ pkt->hdr.dest_port = 0;
+ pkt->hdr.token = port->token;
+ pkt->hdr.opcode = AFE_PORT_CMD_DEVICE_START;
+
+ start->port_id = port_id;
+
+ ret = afe_apr_send_pkt(afe, pkt, port);
+ if (ret)
+ dev_err(afe->dev, "AFE enable for port 0x%x failed %d\n",
+ port_id, ret);
+
+ kfree(pkt);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(q6afe_port_start);
+
+/**
+ * q6afe_port_get_from_id() - Get port instance from a port id
+ *
+ * @dev: Pointer to afe child device.
+ * @id: port id
+ *
+ * Return: Will be an error pointer on error or a valid afe port
+ * on success.
+ */
+struct q6afe_port *q6afe_port_get_from_id(struct device *dev, int id)
+{
+ int port_id;
+ struct q6afe *afe = dev_get_drvdata(dev->parent);
+ struct q6afe_port *port;
+ unsigned long flags;
+>>>>>>>
+ int cfg_type;
+
+ if (id < 0 || id > AFE_PORT_MAX) {
+ dev_err(dev, "AFE port token[%d] invalid!\n", id);
+ return ERR_PTR(-EINVAL);
+ }
+
+<<<<<<<
+=======
+ /* if port is multiple times bind/unbind before callback finishes */
+ port = q6afe_find_port(afe, id);
+ if (port) {
+ dev_err(dev, "AFE Port already open\n");
+ return port;
+ }
+
+>>>>>>>
+ port_id = port_maps[id].port_id;
+
+ switch (port_id) {
+ case AFE_PORT_ID_MULTICHAN_HDMI_RX:
+ cfg_type = AFE_PARAM_ID_HDMI_CONFIG;
+ break;
+ case AFE_PORT_ID_SLIMBUS_MULTI_CHAN_0_RX:
+ case AFE_PORT_ID_SLIMBUS_MULTI_CHAN_1_RX:
+ case AFE_PORT_ID_SLIMBUS_MULTI_CHAN_2_RX:
+ case AFE_PORT_ID_SLIMBUS_MULTI_CHAN_3_RX:
+ case AFE_PORT_ID_SLIMBUS_MULTI_CHAN_4_RX:
+ case AFE_PORT_ID_SLIMBUS_MULTI_CHAN_5_RX:
+ case AFE_PORT_ID_SLIMBUS_MULTI_CHAN_6_RX:
+ cfg_type = AFE_PARAM_ID_SLIMBUS_CONFIG;
+ break;
+
+ case AFE_PORT_ID_PRIMARY_MI2S_RX:
+ case AFE_PORT_ID_PRIMARY_MI2S_TX:
+ case AFE_PORT_ID_SECONDARY_MI2S_RX:
+ case AFE_PORT_ID_SECONDARY_MI2S_TX:
+ case AFE_PORT_ID_TERTIARY_MI2S_RX:
+ case AFE_PORT_ID_TERTIARY_MI2S_TX:
+ case AFE_PORT_ID_QUATERNARY_MI2S_RX:
+ case AFE_PORT_ID_QUATERNARY_MI2S_TX:
+ cfg_type = AFE_PARAM_ID_I2S_CONFIG;
+ break;
+<<<<<<<
+=======
+ case AFE_PORT_ID_PRIMARY_TDM_RX ... AFE_PORT_ID_QUINARY_TDM_TX_7:
+ cfg_type = AFE_PARAM_ID_TDM_CONFIG;
+ break;
+
+>>>>>>>
+ default:
+ dev_err(dev, "Invalid port id 0x%x\n", port_id);
+ return ERR_PTR(-EINVAL);
+ }
+
+<<<<<<<
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+=======
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+>>>>>>>
+ if (!port)
+ return ERR_PTR(-ENOMEM);
+
+ init_waitqueue_head(&port->wait);
+
+ port->token = id;
+ port->id = port_id;
+ port->afe = afe;
+ port->cfg_type = cfg_type;
+<<<<<<<
+ kref_init(&port->refcount);
+
+ spin_lock_irqsave(&afe->port_list_lock, flags);
+ list_add_tail(&port->node, &afe->port_list);
+ spin_unlock_irqrestore(&afe->port_list_lock, flags);
+=======
+
+ spin_lock(&afe->port_list_lock);
+ list_add_tail(&port->node, &afe->port_list);
+ spin_unlock(&afe->port_list_lock);
+>>>>>>>
+
+ return port;
+
+}
+<<<<<<<
+=======
+/**
+ * q6afe_port_get_from_id() - Get port instance from a port id
+ *
+ * @dev: Pointer to afe child device.
+ * @id: port id
+ *
+ * Return: Will be an error pointer on error or a valid afe port
+ * on success.
+ */
+struct q6afe_port *q6afe_port_get_from_id(struct device *dev, int id)
+{
+ struct q6afe *afe = dev_get_drvdata(dev);
+
+ return afe->ops->port_get_from_id(dev, id);
+}
+
+>>>>>>>
+EXPORT_SYMBOL_GPL(q6afe_port_get_from_id);
+
+/**
+ * q6afe_port_put() - Release port reference
+ *
+ * @port: Instance of port to put
+ */
+void q6afe_port_put(struct q6afe_port *port)
+{
+<<<<<<<
+ kref_put(&port->refcount, q6afe_port_free);
+}
+EXPORT_SYMBOL_GPL(q6afe_port_put);
+
+static int q6afe_probe(struct apr_device *adev)
+{
+ struct q6afe *afe;
+ struct device *dev = &adev->dev;
+ struct device_node *dais_np;
+=======
+ struct q6afe *afe = port->afe;
+
+ spin_lock(&afe->port_list_lock);
+ list_del(&port->node);
+ spin_unlock(&afe->port_list_lock);
+}
+EXPORT_SYMBOL_GPL(q6afe_port_put);
+
+struct q6afe_ops q6afev1_ops = {
+ .port_stop = q6afev1_port_stop,
+ .port_start = q6afev1_port_start,
+ .hdmi_prepare = q6afev1_hdmi_port_prepare,
+ .port_get_from_id = q6afev1_port_get_from_id,
+};
+
+struct q6afe_ops q6afev2_ops = {
+ .port_stop = q6afev2_port_stop,
+ .port_start = q6afev2_port_start,
+ .hdmi_prepare = q6afev2_hdmi_port_prepare,
+ .port_get_from_id = q6afev2_port_get_from_id,
+};
+
+static int q6afev2_probe(struct apr_device *adev)
+{
+ struct q6afe *afe;
+ struct device *dev = &adev->dev;
+>>>>>>>
+
+ afe = devm_kzalloc(dev, sizeof(*afe), GFP_KERNEL);
+ if (!afe)
+ return -ENOMEM;
+
+<<<<<<<
+ adev->version = q6core_get_svc_version(adev->svc_id);
+
+ if (APR_SVC_MAJOR_VERSION(adev->version) >= 0x20)
+ afe->ops = &q6afev2_ops;
+ else if (!APR_SVC_MAJOR_VERSION(adev->version))
+ afe->ops = &q6afev1_ops;
+
+ if (!afe->ops) {
+ dev_err(&adev->dev, "Unsupported AFE version\n");
+ return -EINVAL;
+ }
+
+=======
+ q6core_get_svc_api_info(adev->svc_id, &afe->ainfo);
+>>>>>>>
+ afe->apr = adev;
+ mutex_init(&afe->lock);
+ afe->dev = dev;
+ INIT_LIST_HEAD(&afe->port_list);
+ spin_lock_init(&afe->port_list_lock);
+
+ dev_set_drvdata(dev, afe);
+
+<<<<<<<
+ dais_np = of_get_child_by_name(dev->of_node, "dais");
+ if (dais_np) {
+ afe->pdev_dais = of_platform_device_create(dais_np,
+ "q6afe-dai", dev);
+ of_node_put(dais_np);
+ }
+
+ return 0;
+}
+
+static int q6afe_remove(struct apr_device *adev)
+{
+ struct q6afe *afe = dev_get_drvdata(&adev->dev);
+
+ if (afe->pdev_dais)
+ of_platform_device_destroy(&afe->pdev_dais->dev, NULL);
+
+ return 0;
+=======
+ return q6afe_dai_dev_probe(dev);
+}
+
+static int q6afev2_remove(struct apr_device *adev)
+{
+ return q6afe_dai_dev_remove(&adev->dev);
+>>>>>>>
+}
+
+static const struct of_device_id q6afe_device_id[] = {
+ { .compatible = "qcom,q6afe" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, q6afe_device_id);
+
+static struct apr_driver qcom_q6afe_driver = {
+<<<<<<<
+ .probe = q6afe_probe,
+ .remove = q6afe_remove,
+ .callback = q6afe_callback,
+=======
+ .probe = q6afev2_probe,
+ .remove = q6afev2_remove,
+ .callback = afe_callback,
+>>>>>>>
+ .driver = {
+ .name = "qcom-q6afe",
+ .of_match_table = of_match_ptr(q6afe_device_id),
+
+ },
+};
+
+module_apr_driver(qcom_q6afe_driver);
+MODULE_DESCRIPTION("Q6 Audio Front End");
+MODULE_LICENSE("GPL v2");
diff --git a/rr-cache/eeed3d17605fdb06a241cda9eed6d09d601101b6/thisimage b/rr-cache/eeed3d17605fdb06a241cda9eed6d09d601101b6/thisimage
new file mode 100644
index 0000000..3ce8b16
--- /dev/null
+++ b/rr-cache/eeed3d17605fdb06a241cda9eed6d09d601101b6/thisimage
@@ -0,0 +1,2236 @@
+// SPDX-License-Identifier: GPL-2.0
+<<<<<<<
+/*
+ * Copyright (c) 2011-2017, The Linux Foundation
+ * Copyright (c) 2018, Linaro Limited
+ */
+=======
+// Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+// Copyright (c) 2018, Linaro Limited
+>>>>>>>
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+<<<<<<<
+#include <linux/kref.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/spinlock.h>
+=======
+#include <linux/of.h>
+>>>>>>>
+#include <linux/delay.h>
+#include <linux/soc/qcom/apr.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include "q6dsp-errno.h"
+#include "q6core.h"
+#include "q6afe.h"
+
+/* AFE CMDs */
+#define AFE_PORT_CMD_DEVICE_START 0x000100E5
+#define AFE_PORT_CMD_DEVICE_STOP 0x000100E6
+#define AFE_PORT_CMD_SET_PARAM_V2 0x000100EF
+<<<<<<<
+#define AFE_PORT_CMDRSP_GET_PARAM_V2 0x00010106
+#define AFE_PARAM_ID_HDMI_CONFIG 0x00010210
+#define AFE_MODULE_AUDIO_DEV_INTERFACE 0x0001020C
+=======
+#define AFE_SVC_CMD_SET_PARAM 0x000100f3
+#define AFE_PORT_CMDRSP_GET_PARAM_V2 0x00010106
+#define AFE_PARAM_ID_HDMI_CONFIG 0x00010210
+#define AFE_MODULE_AUDIO_DEV_INTERFACE 0x0001020C
+#define AFE_MODULE_TDM 0x0001028A
+>>>>>>>
+
+#define AFE_PARAM_ID_CDC_SLIMBUS_SLAVE_CFG 0x00010235
+
+#define AFE_PARAM_ID_LPAIF_CLK_CONFIG 0x00010238
+<<<<<<<
+#define AFE_PARAM_ID_INTERNAL_DIGITAL_CDC_CLK_CONFIG 0x00010239
+
+#define AFE_PARAM_ID_SLIMBUS_CONFIG 0x00010212
+#define AFE_PARAM_ID_I2S_CONFIG 0x0001020D
+=======
+#define AFE_PARAM_ID_INT_DIGITAL_CDC_CLK_CONFIG 0x00010239
+
+#define AFE_PARAM_ID_SLIMBUS_CONFIG 0x00010212
+#define AFE_PARAM_ID_I2S_CONFIG 0x0001020D
+#define AFE_PARAM_ID_TDM_CONFIG 0x0001029D
+#define AFE_PARAM_ID_PORT_SLOT_MAPPING_CONFIG 0x00010297
+>>>>>>>
+
+/* I2S config specific */
+#define AFE_API_VERSION_I2S_CONFIG 0x1
+#define AFE_PORT_I2S_SD0 0x1
+#define AFE_PORT_I2S_SD1 0x2
+#define AFE_PORT_I2S_SD2 0x3
+#define AFE_PORT_I2S_SD3 0x4
+<<<<<<<
+=======
+#define AFE_PORT_I2S_SD0_MASK BIT(0x1)
+#define AFE_PORT_I2S_SD1_MASK BIT(0x2)
+#define AFE_PORT_I2S_SD2_MASK BIT(0x3)
+#define AFE_PORT_I2S_SD3_MASK BIT(0x4)
+#define AFE_PORT_I2S_SD0_1_MASK GENMASK(2, 1)
+#define AFE_PORT_I2S_SD2_3_MASK GENMASK(4, 3)
+#define AFE_PORT_I2S_SD0_1_2_MASK GENMASK(3, 1)
+#define AFE_PORT_I2S_SD0_1_2_3_MASK GENMASK(4, 1)
+>>>>>>>
+#define AFE_PORT_I2S_QUAD01 0x5
+#define AFE_PORT_I2S_QUAD23 0x6
+#define AFE_PORT_I2S_6CHS 0x7
+#define AFE_PORT_I2S_8CHS 0x8
+#define AFE_PORT_I2S_MONO 0x0
+#define AFE_PORT_I2S_STEREO 0x1
+#define AFE_PORT_CONFIG_I2S_WS_SRC_EXTERNAL 0x0
+#define AFE_PORT_CONFIG_I2S_WS_SRC_INTERNAL 0x1
+#define AFE_LINEAR_PCM_DATA 0x0
+
+
+/* Port IDs */
+#define AFE_API_VERSION_HDMI_CONFIG 0x1
+#define AFE_PORT_ID_MULTICHAN_HDMI_RX 0x100E
+
+#define AFE_API_VERSION_SLIMBUS_CONFIG 0x1
+<<<<<<<
+=======
+/* Clock set API version */
+#define AFE_API_VERSION_CLOCK_SET 1
+#define Q6AFE_LPASS_CLK_CONFIG_API_VERSION 0x1
+#define AFE_MODULE_CLOCK_SET 0x0001028F
+#define AFE_PARAM_ID_CLOCK_SET 0x00010290
+>>>>>>>
+
+/* SLIMbus Rx port on channel 0. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_0_RX 0x4000
+/* SLIMbus Tx port on channel 0. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_0_TX 0x4001
+/* SLIMbus Rx port on channel 1. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_1_RX 0x4002
+/* SLIMbus Tx port on channel 1. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_1_TX 0x4003
+/* SLIMbus Rx port on channel 2. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_2_RX 0x4004
+/* SLIMbus Tx port on channel 2. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_2_TX 0x4005
+/* SLIMbus Rx port on channel 3. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_3_RX 0x4006
+/* SLIMbus Tx port on channel 3. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_3_TX 0x4007
+/* SLIMbus Rx port on channel 4. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_4_RX 0x4008
+/* SLIMbus Tx port on channel 4. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_4_TX 0x4009
+/* SLIMbus Rx port on channel 5. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_5_RX 0x400a
+/* SLIMbus Tx port on channel 5. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_5_TX 0x400b
+/* SLIMbus Rx port on channel 6. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_6_RX 0x400c
+/* SLIMbus Tx port on channel 6. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_6_TX 0x400d
+#define AFE_PORT_ID_PRIMARY_MI2S_RX 0x1000
+#define AFE_PORT_ID_PRIMARY_MI2S_TX 0x1001
+#define AFE_PORT_ID_SECONDARY_MI2S_RX 0x1002
+#define AFE_PORT_ID_SECONDARY_MI2S_TX 0x1003
+#define AFE_PORT_ID_TERTIARY_MI2S_RX 0x1004
+#define AFE_PORT_ID_TERTIARY_MI2S_TX 0x1005
+#define AFE_PORT_ID_QUATERNARY_MI2S_RX 0x1006
+#define AFE_PORT_ID_QUATERNARY_MI2S_TX 0x1007
+
+<<<<<<<
+=======
+/* Start of the range of port IDs for TDM devices. */
+#define AFE_PORT_ID_TDM_PORT_RANGE_START 0x9000
+
+/* End of the range of port IDs for TDM devices. */
+#define AFE_PORT_ID_TDM_PORT_RANGE_END \
+ (AFE_PORT_ID_TDM_PORT_RANGE_START+0x50-1)
+
+/* Size of the range of port IDs for TDM ports. */
+#define AFE_PORT_ID_TDM_PORT_RANGE_SIZE \
+ (AFE_PORT_ID_TDM_PORT_RANGE_END - \
+ AFE_PORT_ID_TDM_PORT_RANGE_START+1)
+
+#define AFE_PORT_ID_PRIMARY_TDM_RX \
+ (AFE_PORT_ID_TDM_PORT_RANGE_START + 0x00)
+#define AFE_PORT_ID_PRIMARY_TDM_RX_1 \
+ (AFE_PORT_ID_PRIMARY_TDM_RX + 0x02)
+#define AFE_PORT_ID_PRIMARY_TDM_RX_2 \
+ (AFE_PORT_ID_PRIMARY_TDM_RX + 0x04)
+#define AFE_PORT_ID_PRIMARY_TDM_RX_3 \
+ (AFE_PORT_ID_PRIMARY_TDM_RX + 0x06)
+#define AFE_PORT_ID_PRIMARY_TDM_RX_4 \
+ (AFE_PORT_ID_PRIMARY_TDM_RX + 0x08)
+#define AFE_PORT_ID_PRIMARY_TDM_RX_5 \
+ (AFE_PORT_ID_PRIMARY_TDM_RX + 0x0A)
+#define AFE_PORT_ID_PRIMARY_TDM_RX_6 \
+ (AFE_PORT_ID_PRIMARY_TDM_RX + 0x0C)
+#define AFE_PORT_ID_PRIMARY_TDM_RX_7 \
+ (AFE_PORT_ID_PRIMARY_TDM_RX + 0x0E)
+
+#define AFE_PORT_ID_PRIMARY_TDM_TX \
+ (AFE_PORT_ID_TDM_PORT_RANGE_START + 0x01)
+#define AFE_PORT_ID_PRIMARY_TDM_TX_1 \
+ (AFE_PORT_ID_PRIMARY_TDM_TX + 0x02)
+#define AFE_PORT_ID_PRIMARY_TDM_TX_2 \
+ (AFE_PORT_ID_PRIMARY_TDM_TX + 0x04)
+#define AFE_PORT_ID_PRIMARY_TDM_TX_3 \
+ (AFE_PORT_ID_PRIMARY_TDM_TX + 0x06)
+#define AFE_PORT_ID_PRIMARY_TDM_TX_4 \
+ (AFE_PORT_ID_PRIMARY_TDM_TX + 0x08)
+#define AFE_PORT_ID_PRIMARY_TDM_TX_5 \
+ (AFE_PORT_ID_PRIMARY_TDM_TX + 0x0A)
+#define AFE_PORT_ID_PRIMARY_TDM_TX_6 \
+ (AFE_PORT_ID_PRIMARY_TDM_TX + 0x0C)
+#define AFE_PORT_ID_PRIMARY_TDM_TX_7 \
+ (AFE_PORT_ID_PRIMARY_TDM_TX + 0x0E)
+
+#define AFE_PORT_ID_SECONDARY_TDM_RX \
+ (AFE_PORT_ID_TDM_PORT_RANGE_START + 0x10)
+#define AFE_PORT_ID_SECONDARY_TDM_RX_1 \
+ (AFE_PORT_ID_SECONDARY_TDM_RX + 0x02)
+#define AFE_PORT_ID_SECONDARY_TDM_RX_2 \
+ (AFE_PORT_ID_SECONDARY_TDM_RX + 0x04)
+#define AFE_PORT_ID_SECONDARY_TDM_RX_3 \
+ (AFE_PORT_ID_SECONDARY_TDM_RX + 0x06)
+#define AFE_PORT_ID_SECONDARY_TDM_RX_4 \
+ (AFE_PORT_ID_SECONDARY_TDM_RX + 0x08)
+#define AFE_PORT_ID_SECONDARY_TDM_RX_5 \
+ (AFE_PORT_ID_SECONDARY_TDM_RX + 0x0A)
+#define AFE_PORT_ID_SECONDARY_TDM_RX_6 \
+ (AFE_PORT_ID_SECONDARY_TDM_RX + 0x0C)
+#define AFE_PORT_ID_SECONDARY_TDM_RX_7 \
+ (AFE_PORT_ID_SECONDARY_TDM_RX + 0x0E)
+
+#define AFE_PORT_ID_SECONDARY_TDM_TX \
+ (AFE_PORT_ID_TDM_PORT_RANGE_START + 0x11)
+#define AFE_PORT_ID_SECONDARY_TDM_TX_1 \
+ (AFE_PORT_ID_SECONDARY_TDM_TX + 0x02)
+#define AFE_PORT_ID_SECONDARY_TDM_TX_2 \
+ (AFE_PORT_ID_SECONDARY_TDM_TX + 0x04)
+#define AFE_PORT_ID_SECONDARY_TDM_TX_3 \
+ (AFE_PORT_ID_SECONDARY_TDM_TX + 0x06)
+#define AFE_PORT_ID_SECONDARY_TDM_TX_4 \
+ (AFE_PORT_ID_SECONDARY_TDM_TX + 0x08)
+#define AFE_PORT_ID_SECONDARY_TDM_TX_5 \
+ (AFE_PORT_ID_SECONDARY_TDM_TX + 0x0A)
+#define AFE_PORT_ID_SECONDARY_TDM_TX_6 \
+ (AFE_PORT_ID_SECONDARY_TDM_TX + 0x0C)
+#define AFE_PORT_ID_SECONDARY_TDM_TX_7 \
+ (AFE_PORT_ID_SECONDARY_TDM_TX + 0x0E)
+
+#define AFE_PORT_ID_TERTIARY_TDM_RX \
+ (AFE_PORT_ID_TDM_PORT_RANGE_START + 0x20)
+#define AFE_PORT_ID_TERTIARY_TDM_RX_1 \
+ (AFE_PORT_ID_TERTIARY_TDM_RX + 0x02)
+#define AFE_PORT_ID_TERTIARY_TDM_RX_2 \
+ (AFE_PORT_ID_TERTIARY_TDM_RX + 0x04)
+#define AFE_PORT_ID_TERTIARY_TDM_RX_3 \
+ (AFE_PORT_ID_TERTIARY_TDM_RX + 0x06)
+#define AFE_PORT_ID_TERTIARY_TDM_RX_4 \
+ (AFE_PORT_ID_TERTIARY_TDM_RX + 0x08)
+#define AFE_PORT_ID_TERTIARY_TDM_RX_5 \
+ (AFE_PORT_ID_TERTIARY_TDM_RX + 0x0A)
+#define AFE_PORT_ID_TERTIARY_TDM_RX_6 \
+ (AFE_PORT_ID_TERTIARY_TDM_RX + 0x0C)
+#define AFE_PORT_ID_TERTIARY_TDM_RX_7 \
+ (AFE_PORT_ID_TERTIARY_TDM_RX + 0x0E)
+
+#define AFE_PORT_ID_TERTIARY_TDM_TX \
+ (AFE_PORT_ID_TDM_PORT_RANGE_START + 0x21)
+#define AFE_PORT_ID_TERTIARY_TDM_TX_1 \
+ (AFE_PORT_ID_TERTIARY_TDM_TX + 0x02)
+#define AFE_PORT_ID_TERTIARY_TDM_TX_2 \
+ (AFE_PORT_ID_TERTIARY_TDM_TX + 0x04)
+#define AFE_PORT_ID_TERTIARY_TDM_TX_3 \
+ (AFE_PORT_ID_TERTIARY_TDM_TX + 0x06)
+#define AFE_PORT_ID_TERTIARY_TDM_TX_4 \
+ (AFE_PORT_ID_TERTIARY_TDM_TX + 0x08)
+#define AFE_PORT_ID_TERTIARY_TDM_TX_5 \
+ (AFE_PORT_ID_TERTIARY_TDM_TX + 0x0A)
+#define AFE_PORT_ID_TERTIARY_TDM_TX_6 \
+ (AFE_PORT_ID_TERTIARY_TDM_TX + 0x0C)
+#define AFE_PORT_ID_TERTIARY_TDM_TX_7 \
+ (AFE_PORT_ID_TERTIARY_TDM_TX + 0x0E)
+
+#define AFE_PORT_ID_QUATERNARY_TDM_RX \
+ (AFE_PORT_ID_TDM_PORT_RANGE_START + 0x30)
+#define AFE_PORT_ID_QUATERNARY_TDM_RX_1 \
+ (AFE_PORT_ID_QUATERNARY_TDM_RX + 0x02)
+#define AFE_PORT_ID_QUATERNARY_TDM_RX_2 \
+ (AFE_PORT_ID_QUATERNARY_TDM_RX + 0x04)
+#define AFE_PORT_ID_QUATERNARY_TDM_RX_3 \
+ (AFE_PORT_ID_QUATERNARY_TDM_RX + 0x06)
+#define AFE_PORT_ID_QUATERNARY_TDM_RX_4 \
+ (AFE_PORT_ID_QUATERNARY_TDM_RX + 0x08)
+#define AFE_PORT_ID_QUATERNARY_TDM_RX_5 \
+ (AFE_PORT_ID_QUATERNARY_TDM_RX + 0x0A)
+#define AFE_PORT_ID_QUATERNARY_TDM_RX_6 \
+ (AFE_PORT_ID_QUATERNARY_TDM_RX + 0x0C)
+#define AFE_PORT_ID_QUATERNARY_TDM_RX_7 \
+ (AFE_PORT_ID_QUATERNARY_TDM_RX + 0x0E)
+
+#define AFE_PORT_ID_QUATERNARY_TDM_TX \
+ (AFE_PORT_ID_TDM_PORT_RANGE_START + 0x31)
+#define AFE_PORT_ID_QUATERNARY_TDM_TX_1 \
+ (AFE_PORT_ID_QUATERNARY_TDM_TX + 0x02)
+#define AFE_PORT_ID_QUATERNARY_TDM_TX_2 \
+ (AFE_PORT_ID_QUATERNARY_TDM_TX + 0x04)
+#define AFE_PORT_ID_QUATERNARY_TDM_TX_3 \
+ (AFE_PORT_ID_QUATERNARY_TDM_TX + 0x06)
+#define AFE_PORT_ID_QUATERNARY_TDM_TX_4 \
+ (AFE_PORT_ID_QUATERNARY_TDM_TX + 0x08)
+#define AFE_PORT_ID_QUATERNARY_TDM_TX_5 \
+ (AFE_PORT_ID_QUATERNARY_TDM_TX + 0x0A)
+#define AFE_PORT_ID_QUATERNARY_TDM_TX_6 \
+ (AFE_PORT_ID_QUATERNARY_TDM_TX + 0x0C)
+#define AFE_PORT_ID_QUATERNARY_TDM_TX_7 \
+ (AFE_PORT_ID_QUATERNARY_TDM_TX + 0x0E)
+
+#define AFE_PORT_ID_QUINARY_TDM_RX \
+ (AFE_PORT_ID_TDM_PORT_RANGE_START + 0x40)
+#define AFE_PORT_ID_QUINARY_TDM_RX_1 \
+ (AFE_PORT_ID_QUINARY_TDM_RX + 0x02)
+#define AFE_PORT_ID_QUINARY_TDM_RX_2 \
+ (AFE_PORT_ID_QUINARY_TDM_RX + 0x04)
+#define AFE_PORT_ID_QUINARY_TDM_RX_3 \
+ (AFE_PORT_ID_QUINARY_TDM_RX + 0x06)
+#define AFE_PORT_ID_QUINARY_TDM_RX_4 \
+ (AFE_PORT_ID_QUINARY_TDM_RX + 0x08)
+#define AFE_PORT_ID_QUINARY_TDM_RX_5 \
+ (AFE_PORT_ID_QUINARY_TDM_RX + 0x0A)
+#define AFE_PORT_ID_QUINARY_TDM_RX_6 \
+ (AFE_PORT_ID_QUINARY_TDM_RX + 0x0C)
+#define AFE_PORT_ID_QUINARY_TDM_RX_7 \
+ (AFE_PORT_ID_QUINARY_TDM_RX + 0x0E)
+
+#define AFE_PORT_ID_QUINARY_TDM_TX \
+ (AFE_PORT_ID_TDM_PORT_RANGE_START + 0x41)
+#define AFE_PORT_ID_QUINARY_TDM_TX_1 \
+ (AFE_PORT_ID_QUINARY_TDM_TX + 0x02)
+#define AFE_PORT_ID_QUINARY_TDM_TX_2 \
+ (AFE_PORT_ID_QUINARY_TDM_TX + 0x04)
+#define AFE_PORT_ID_QUINARY_TDM_TX_3 \
+ (AFE_PORT_ID_QUINARY_TDM_TX + 0x06)
+#define AFE_PORT_ID_QUINARY_TDM_TX_4 \
+ (AFE_PORT_ID_QUINARY_TDM_TX + 0x08)
+#define AFE_PORT_ID_QUINARY_TDM_TX_5 \
+ (AFE_PORT_ID_QUINARY_TDM_TX + 0x0A)
+#define AFE_PORT_ID_QUINARY_TDM_TX_6 \
+ (AFE_PORT_ID_QUINARY_TDM_TX + 0x0C)
+#define AFE_PORT_ID_QUINARY_TDM_TX_7 \
+ (AFE_PORT_ID_QUINARY_TDM_TX + 0x0E)
+
+>>>>>>>
+#define Q6AFE_LPASS_MODE_CLK1_VALID 1
+#define Q6AFE_LPASS_MODE_CLK2_VALID 2
+#define Q6AFE_LPASS_CLK_SRC_INTERNAL 1
+#define Q6AFE_LPASS_CLK_ROOT_DEFAULT 0
+<<<<<<<
+=======
+#define AFE_API_VERSION_TDM_CONFIG 1
+#define AFE_API_VERSION_SLOT_MAPPING_CONFIG 1
+>>>>>>>
+
+#define TIMEOUT_MS 1000
+#define AFE_CMD_RESP_AVAIL 0
+#define AFE_CMD_RESP_NONE 1
+
+<<<<<<<
+/* v1 Specific */
+#define AFE_PORT_MULTI_CHAN_HDMI_AUDIO_IF_CONFIG 0x000100D9
+#define AFE_PORT_CMD_START 0x000100ca
+#define AFE_PORT_CMD_STOP 0x000100cb
+
+struct q6afe_ops {
+ int (*port_stop)(struct q6afe_port *port);
+ int (*port_start)(struct q6afe_port *port);
+ struct q6afe_port * (*port_get_from_id)(struct device *dev, int id);
+ void (*hdmi_prepare) (struct q6afe_port *port,
+ struct q6afe_hdmi_cfg *cfg);
+};
+
+struct q6afe {
+ struct apr_device *apr;
+ struct device *dev;
+ int state;
+ int status;
+
+ struct mutex lock;
+ struct list_head port_list;
+ spinlock_t port_list_lock;
+ struct list_head node;
+ void *dai_data;
+ struct q6afe_ops *ops;
+};
+
+struct afe_port_start_command_v1 {
+ struct apr_hdr hdr;
+ u16 port_id;
+ u16 gain; /* Q13 */
+ u32 sample_rate; /* 8 , 16, 48khz */
+} __packed;
+
+
+struct afe_port_cmd_device_start {
+ struct apr_hdr hdr;
+=======
+struct q6afe {
+ struct apr_device *apr;
+ struct device *dev;
+ struct q6core_svc_api_info ainfo;
+ struct mutex lock;
+ struct list_head port_list;
+ spinlock_t port_list_lock;
+ struct platform_device *pdev_dais;
+};
+
+struct afe_port_cmd_device_start {
+>>>>>>>
+ u16 port_id;
+ u16 reserved;
+} __packed;
+
+<<<<<<<
+struct afe_port_cmd_device_stop {
+=======
+struct afe_port_stop_command_v1 {
+ struct apr_hdr hdr;
+ u16 port_id;
+ u16 reserved;
+} __attribute__ ((packed));
+
+struct afe_port_cmd_device_stop {
+ struct apr_hdr hdr;
+>>>>>>>
+ u16 port_id;
+ u16 reserved;
+/* Reserved for 32-bit alignment. This field must be set to 0.*/
+} __packed;
+
+struct afe_port_param_data_v2 {
+ u32 module_id;
+ u32 param_id;
+ u16 param_size;
+ u16 reserved;
+} __packed;
+
+<<<<<<<
+=======
+struct afe_svc_cmd_set_param {
+ uint32_t payload_size;
+ uint32_t payload_address_lsw;
+ uint32_t payload_address_msw;
+ uint32_t mem_map_handle;
+} __packed;
+
+>>>>>>>
+struct afe_port_cmd_set_param_v2 {
+ u16 port_id;
+ u16 payload_size;
+ u32 payload_address_lsw;
+ u32 payload_address_msw;
+ u32 mem_map_handle;
+} __packed;
+
+struct afe_param_id_hdmi_multi_chan_audio_cfg {
+ u32 hdmi_cfg_minor_version;
+ u16 datatype;
+ u16 channel_allocation;
+ u32 sample_rate;
+ u16 bit_width;
+ u16 reserved;
+} __packed;
+
+struct afe_param_id_slimbus_cfg {
+ u32 sb_cfg_minor_version;
+/* Minor version used for tracking the version of the SLIMBUS
+ * configuration interface.
+ * Supported values: #AFE_API_VERSION_SLIMBUS_CONFIG
+ */
+
+ u16 slimbus_dev_id;
+/* SLIMbus hardware device ID, which is required to handle
+ * multiple SLIMbus hardware blocks.
+ * Supported values: - #AFE_SLIMBUS_DEVICE_1 - #AFE_SLIMBUS_DEVICE_2
+ */
+ u16 bit_width;
+/* Bit width of the sample.
+ * Supported values: 16, 24
+ */
+ u16 data_format;
+/* Data format supported by the SLIMbus hardware. The default is
+ * 0 (#AFE_SB_DATA_FORMAT_NOT_INDICATED), which indicates the
+ * hardware does not perform any format conversions before the data
+ * transfer.
+ */
+ u16 num_channels;
+/* Number of channels.
+ * Supported values: 1 to #AFE_PORT_MAX_AUDIO_CHAN_CNT
+ */
+ u8 shared_ch_mapping[AFE_PORT_MAX_AUDIO_CHAN_CNT];
+/* Mapping of shared channel IDs (128 to 255) to which the
+ * master port is to be connected.
+ * Shared_channel_mapping[i] represents the shared channel assigned
+ * for audio channel i in multichannel audio data.
+ */
+ u32 sample_rate;
+/* Sampling rate of the port.
+ * Supported values:
+ * - #AFE_PORT_SAMPLE_RATE_8K
+ * - #AFE_PORT_SAMPLE_RATE_16K
+ * - #AFE_PORT_SAMPLE_RATE_48K
+ * - #AFE_PORT_SAMPLE_RATE_96K
+ * - #AFE_PORT_SAMPLE_RATE_192K
+ */
+} __packed;
+
+struct afe_clk_cfg {
+ u32 i2s_cfg_minor_version;
+ u32 clk_val1;
+ u32 clk_val2;
+ u16 clk_src;
+ u16 clk_root;
+ u16 clk_set_mode;
+ u16 reserved;
+} __packed;
+
+struct afe_digital_clk_cfg {
+ u32 i2s_cfg_minor_version;
+ u32 clk_val;
+ u16 clk_root;
+ u16 reserved;
+} __packed;
+
+struct afe_param_id_i2s_cfg {
+ u32 i2s_cfg_minor_version;
+ u16 bit_width;
+ u16 channel_mode;
+ u16 mono_stereo;
+ u16 ws_src;
+ u32 sample_rate;
+ u16 data_format;
+ u16 reserved;
+} __packed;
+
+<<<<<<<
+struct afe_param_id_tdm_cfg {
+ u32 tdm_cfg_minor_version;
+ u32 num_channels;
+ u32 sample_rate;
+ u32 bit_width;
+ u16 data_format;
+ u16 sync_mode;
+ u16 sync_src;
+ u16 nslots_per_frame;
+ u16 ctrl_data_out_enable;
+ u16 ctrl_invert_sync_pulse;
+ u16 ctrl_sync_data_delay;
+ u16 slot_width;
+ u32 slot_mask;
+} __packed;
+
+=======
+struct afe_port_hdmi_multi_ch_cfg {
+ u16 data_type; /* HDMI_Linear = 0 */
+ /* HDMI_non_Linear = 1 */
+ u16 channel_allocation; /* The default is 0 (Stereo) */
+ u16 reserved; /* must be set to 0 */
+} __packed;
+
+
+
+struct afe_port_hdmi_cfg {
+ u16 bitwidth; /* 16,24,32 */
+ u16 channel_mode; /* HDMI Stereo = 0 */
+ /* HDMI_3Point1 (4-ch) = 1 */
+ /* HDMI_5Point1 (6-ch) = 2 */
+ /* HDMI_6Point1 (8-ch) = 3 */
+ u16 data_type; /* HDMI_Linear = 0 */
+ /* HDMI_non_Linear = 1 */
+} __packed;
+
+union afe_port_config_v1 {
+ struct afe_port_hdmi_multi_ch_cfg hdmi_multi_ch;
+};
+
+>>>>>>>
+union afe_port_config {
+ struct afe_param_id_hdmi_multi_chan_audio_cfg hdmi_multi_ch;
+ struct afe_param_id_slimbus_cfg slim_cfg;
+ struct afe_param_id_i2s_cfg i2s_cfg;
+<<<<<<<
+ struct afe_param_id_tdm_cfg tdm_cfg;
+} __packed;
+
+
+struct afe_clk_set {
+ uint32_t clk_set_minor_version;
+ uint32_t clk_id;
+ uint32_t clk_freq_in_hz;
+ uint16_t clk_attri;
+ uint16_t clk_root;
+ uint32_t enable;
+};
+
+struct afe_param_id_slot_mapping_cfg {
+ u32 minor_version;
+ u16 num_channels;
+ u16 bitwidth;
+ u32 data_align_type;
+ u16 ch_mapping[AFE_PORT_MAX_AUDIO_CHAN_CNT];
+} __packed;
+
+struct q6afe_port {
+ wait_queue_head_t wait;
+ union afe_port_config port_cfg;
+ struct afe_param_id_slot_mapping_cfg *scfg;
+ struct aprv2_ibasic_rsp_result_t result;
+ int token;
+ int id;
+ int cfg_type;
+ struct q6afe *afe;
+ struct kref refcount;
+ struct list_head node;
+};
+
+=======
+} __packed;
+
+struct afe_lpass_clk_config_command {
+ struct apr_hdr hdr;
+ struct afe_port_cmd_set_param_v2 param;
+ struct afe_port_param_data_v2 pdata;
+ struct afe_clk_cfg clk_cfg;
+} __packed;
+
+struct afe_lpass_digital_clk_config_command {
+ struct apr_hdr hdr;
+ struct afe_port_cmd_set_param_v2 param;
+ struct afe_port_param_data_v2 pdata;
+ struct afe_digital_clk_cfg clk_cfg;
+} __packed;
+
+/* This param id is used to configure internal clk */
+struct q6afe_port {
+ wait_queue_head_t wait;
+ union afe_port_config port_cfg;
+ union afe_port_config_v1 port_cfgv1;
+ int token;
+ int id;
+ int cfg_type;
+ int rate;
+ struct q6afe *afe;
+ struct list_head node;
+};
+
+struct afe_audioif_config_command {
+ struct apr_hdr hdr;
+ struct afe_port_cmd_set_param_v2 param;
+ struct afe_port_param_data_v2 pdata;
+ union afe_port_config port;
+} __packed;
+
+
+struct afe_audioif_config_command_v1 {
+ struct apr_hdr hdr;
+ u16 port_id;
+ union afe_port_config_v1 port;
+} __packed;
+
+>>>>>>>
+struct afe_port_map {
+ int port_id;
+ int token;
+ int is_rx;
+ int is_dig_pcm;
+};
+
+<<<<<<<
+/*
+ * Mapping between Virtual Port IDs to DSP AFE Port ID
+ * On B Family SoCs DSP Port IDs are consistent across multiple SoCs
+ * on A Family SoCs DSP port IDs are same as virtual Port IDs.
+ */
+
+static struct afe_port_map port_maps[AFE_PORT_MAX] = {
+ [HDMI_RX] = { AFE_PORT_ID_MULTICHAN_HDMI_RX, HDMI_RX, 1, 1},
+=======
+/* Port map of index vs real hw port ids */
+static struct afe_port_map port_maps[AFE_PORT_MAX] = {
+ [AFE_PORT_HDMI_RX] = { AFE_PORT_ID_MULTICHAN_HDMI_RX,
+ AFE_PORT_HDMI_RX, 1, 1},
+>>>>>>>
+ [SLIMBUS_0_RX] = { AFE_PORT_ID_SLIMBUS_MULTI_CHAN_0_RX,
+ SLIMBUS_0_RX, 1, 1},
+ [SLIMBUS_1_RX] = { AFE_PORT_ID_SLIMBUS_MULTI_CHAN_1_RX,
+ SLIMBUS_1_RX, 1, 1},
+ [SLIMBUS_2_RX] = { AFE_PORT_ID_SLIMBUS_MULTI_CHAN_2_RX,
+ SLIMBUS_2_RX, 1, 1},
+ [SLIMBUS_3_RX] = { AFE_PORT_ID_SLIMBUS_MULTI_CHAN_3_RX,
+ SLIMBUS_3_RX, 1, 1},
+ [SLIMBUS_4_RX] = { AFE_PORT_ID_SLIMBUS_MULTI_CHAN_4_RX,
+ SLIMBUS_4_RX, 1, 1},
+ [SLIMBUS_5_RX] = { AFE_PORT_ID_SLIMBUS_MULTI_CHAN_5_RX,
+ SLIMBUS_5_RX, 1, 1},
+<<<<<<<
+ [QUATERNARY_MI2S_RX] = { AFE_PORT_ID_QUATERNARY_MI2S_RX,
+ QUATERNARY_MI2S_RX, 1, 1},
+ [QUATERNARY_MI2S_TX] = { AFE_PORT_ID_QUATERNARY_MI2S_TX,
+ QUATERNARY_MI2S_TX, 0, 1},
+=======
+ [SLIMBUS_6_RX] = { AFE_PORT_ID_SLIMBUS_MULTI_CHAN_6_RX,
+ SLIMBUS_6_RX, 1, 1},
+ [PRIMARY_MI2S_RX] = { AFE_PORT_ID_PRIMARY_MI2S_RX,
+ PRIMARY_MI2S_RX, 1, 1},
+ [PRIMARY_MI2S_TX] = { AFE_PORT_ID_PRIMARY_MI2S_TX,
+ PRIMARY_MI2S_RX, 0, 1},
+>>>>>>>
+ [SECONDARY_MI2S_RX] = { AFE_PORT_ID_SECONDARY_MI2S_RX,
+ SECONDARY_MI2S_RX, 1, 1},
+ [SECONDARY_MI2S_TX] = { AFE_PORT_ID_SECONDARY_MI2S_TX,
+ SECONDARY_MI2S_TX, 0, 1},
+ [TERTIARY_MI2S_RX] = { AFE_PORT_ID_TERTIARY_MI2S_RX,
+ TERTIARY_MI2S_RX, 1, 1},
+ [TERTIARY_MI2S_TX] = { AFE_PORT_ID_TERTIARY_MI2S_TX,
+ TERTIARY_MI2S_TX, 0, 1},
+<<<<<<<
+ [PRIMARY_MI2S_RX] = { AFE_PORT_ID_PRIMARY_MI2S_RX,
+ PRIMARY_MI2S_RX, 1, 1},
+ [PRIMARY_MI2S_TX] = { AFE_PORT_ID_PRIMARY_MI2S_TX,
+ PRIMARY_MI2S_RX, 0, 1},
+ [SLIMBUS_6_RX] = { AFE_PORT_ID_SLIMBUS_MULTI_CHAN_6_RX,
+ SLIMBUS_6_RX, 1, 1},
+};
+
+static struct q6afe_port *afe_find_port(struct q6afe *afe, int token)
+{
+ struct q6afe_port *p = NULL;
+
+ spin_lock(&afe->port_list_lock);
+ list_for_each_entry(p, &afe->port_list, node)
+ if (p->token == token)
+ break;
+
+ spin_unlock(&afe->port_list_lock);
+ return p;
+}
+
+static int afe_callback(struct apr_device *adev,
+ struct apr_client_message *data)
+{
+ struct q6afe *afe = dev_get_drvdata(&adev->dev);
+ struct aprv2_ibasic_rsp_result_t *res;
+=======
+ [QUATERNARY_MI2S_RX] = { AFE_PORT_ID_QUATERNARY_MI2S_RX,
+ QUATERNARY_MI2S_RX, 1, 1},
+ [QUATERNARY_MI2S_TX] = { AFE_PORT_ID_QUATERNARY_MI2S_TX,
+ QUATERNARY_MI2S_TX, 0, 1},
+ [PRIMARY_TDM_RX_0] = { AFE_PORT_ID_PRIMARY_TDM_RX,
+ PRIMARY_TDM_RX_0, 1, 1},
+ [PRIMARY_TDM_TX_0] = { AFE_PORT_ID_PRIMARY_TDM_TX,
+ PRIMARY_TDM_TX_0, 0, 1},
+ [PRIMARY_TDM_RX_1] = { AFE_PORT_ID_PRIMARY_TDM_RX_1,
+ PRIMARY_TDM_RX_1, 1, 1},
+ [PRIMARY_TDM_TX_1] = { AFE_PORT_ID_PRIMARY_TDM_TX_1,
+ PRIMARY_TDM_TX_1, 0, 1},
+ [PRIMARY_TDM_RX_2] = { AFE_PORT_ID_PRIMARY_TDM_RX_2,
+ PRIMARY_TDM_RX_2, 1, 1},
+ [PRIMARY_TDM_TX_2] = { AFE_PORT_ID_PRIMARY_TDM_TX_2,
+ PRIMARY_TDM_TX_2, 0, 1},
+ [PRIMARY_TDM_RX_3] = { AFE_PORT_ID_PRIMARY_TDM_RX_3,
+ PRIMARY_TDM_RX_3, 1, 1},
+ [PRIMARY_TDM_TX_3] = { AFE_PORT_ID_PRIMARY_TDM_TX_3,
+ PRIMARY_TDM_TX_3, 0, 1},
+ [PRIMARY_TDM_RX_4] = { AFE_PORT_ID_PRIMARY_TDM_RX_4,
+ PRIMARY_TDM_RX_4, 1, 1},
+ [PRIMARY_TDM_TX_4] = { AFE_PORT_ID_PRIMARY_TDM_TX_4,
+ PRIMARY_TDM_TX_4, 0, 1},
+ [PRIMARY_TDM_RX_5] = { AFE_PORT_ID_PRIMARY_TDM_RX_5,
+ PRIMARY_TDM_RX_5, 1, 1},
+ [PRIMARY_TDM_TX_5] = { AFE_PORT_ID_PRIMARY_TDM_TX_5,
+ PRIMARY_TDM_TX_5, 0, 1},
+ [PRIMARY_TDM_RX_6] = { AFE_PORT_ID_PRIMARY_TDM_RX_6,
+ PRIMARY_TDM_RX_6, 1, 1},
+ [PRIMARY_TDM_TX_6] = { AFE_PORT_ID_PRIMARY_TDM_TX_6,
+ PRIMARY_TDM_TX_6, 0, 1},
+ [PRIMARY_TDM_RX_7] = { AFE_PORT_ID_PRIMARY_TDM_RX_7,
+ PRIMARY_TDM_RX_7, 1, 1},
+ [PRIMARY_TDM_TX_7] = { AFE_PORT_ID_PRIMARY_TDM_TX_7,
+ PRIMARY_TDM_TX_7, 0, 1},
+ [SECONDARY_TDM_RX_0] = { AFE_PORT_ID_SECONDARY_TDM_RX,
+ SECONDARY_TDM_RX_0, 1, 1},
+ [SECONDARY_TDM_TX_0] = { AFE_PORT_ID_SECONDARY_TDM_TX,
+ SECONDARY_TDM_TX_0, 0, 1},
+ [SECONDARY_TDM_RX_1] = { AFE_PORT_ID_SECONDARY_TDM_RX_1,
+ SECONDARY_TDM_RX_1, 1, 1},
+ [SECONDARY_TDM_TX_1] = { AFE_PORT_ID_SECONDARY_TDM_TX_1,
+ SECONDARY_TDM_TX_1, 0, 1},
+ [SECONDARY_TDM_RX_2] = { AFE_PORT_ID_SECONDARY_TDM_RX_2,
+ SECONDARY_TDM_RX_2, 1, 1},
+ [SECONDARY_TDM_TX_2] = { AFE_PORT_ID_SECONDARY_TDM_TX_2,
+ SECONDARY_TDM_TX_2, 0, 1},
+ [SECONDARY_TDM_RX_3] = { AFE_PORT_ID_SECONDARY_TDM_RX_3,
+ SECONDARY_TDM_RX_3, 1, 1},
+ [SECONDARY_TDM_TX_3] = { AFE_PORT_ID_SECONDARY_TDM_TX_3,
+ SECONDARY_TDM_TX_3, 0, 1},
+ [SECONDARY_TDM_RX_4] = { AFE_PORT_ID_SECONDARY_TDM_RX_4,
+ SECONDARY_TDM_RX_4, 1, 1},
+ [SECONDARY_TDM_TX_4] = { AFE_PORT_ID_SECONDARY_TDM_TX_4,
+ SECONDARY_TDM_TX_4, 0, 1},
+ [SECONDARY_TDM_RX_5] = { AFE_PORT_ID_SECONDARY_TDM_RX_5,
+ SECONDARY_TDM_RX_5, 1, 1},
+ [SECONDARY_TDM_TX_5] = { AFE_PORT_ID_SECONDARY_TDM_TX_5,
+ SECONDARY_TDM_TX_5, 0, 1},
+ [SECONDARY_TDM_RX_6] = { AFE_PORT_ID_SECONDARY_TDM_RX_6,
+ SECONDARY_TDM_RX_6, 1, 1},
+ [SECONDARY_TDM_TX_6] = { AFE_PORT_ID_SECONDARY_TDM_TX_6,
+ SECONDARY_TDM_TX_6, 0, 1},
+ [SECONDARY_TDM_RX_7] = { AFE_PORT_ID_SECONDARY_TDM_RX_7,
+ SECONDARY_TDM_RX_7, 1, 1},
+ [SECONDARY_TDM_TX_7] = { AFE_PORT_ID_SECONDARY_TDM_TX_7,
+ SECONDARY_TDM_TX_7, 0, 1},
+ [TERTIARY_TDM_RX_0] = { AFE_PORT_ID_TERTIARY_TDM_RX,
+ TERTIARY_TDM_RX_0, 1, 1},
+ [TERTIARY_TDM_TX_0] = { AFE_PORT_ID_TERTIARY_TDM_TX,
+ TERTIARY_TDM_TX_0, 0, 1},
+ [TERTIARY_TDM_RX_1] = { AFE_PORT_ID_TERTIARY_TDM_RX_1,
+ TERTIARY_TDM_RX_1, 1, 1},
+ [TERTIARY_TDM_TX_1] = { AFE_PORT_ID_TERTIARY_TDM_TX_1,
+ TERTIARY_TDM_TX_1, 0, 1},
+ [TERTIARY_TDM_RX_2] = { AFE_PORT_ID_TERTIARY_TDM_RX_2,
+ TERTIARY_TDM_RX_2, 1, 1},
+ [TERTIARY_TDM_TX_2] = { AFE_PORT_ID_TERTIARY_TDM_TX_2,
+ TERTIARY_TDM_TX_2, 0, 1},
+ [TERTIARY_TDM_RX_3] = { AFE_PORT_ID_TERTIARY_TDM_RX_3,
+ TERTIARY_TDM_RX_3, 1, 1},
+ [TERTIARY_TDM_TX_3] = { AFE_PORT_ID_TERTIARY_TDM_TX_3,
+ TERTIARY_TDM_TX_3, 0, 1},
+ [TERTIARY_TDM_RX_4] = { AFE_PORT_ID_TERTIARY_TDM_RX_4,
+ TERTIARY_TDM_RX_4, 1, 1},
+ [TERTIARY_TDM_TX_4] = { AFE_PORT_ID_TERTIARY_TDM_TX_4,
+ TERTIARY_TDM_TX_4, 0, 1},
+ [TERTIARY_TDM_RX_5] = { AFE_PORT_ID_TERTIARY_TDM_RX_5,
+ TERTIARY_TDM_RX_5, 1, 1},
+ [TERTIARY_TDM_TX_5] = { AFE_PORT_ID_TERTIARY_TDM_TX_5,
+ TERTIARY_TDM_TX_5, 0, 1},
+ [TERTIARY_TDM_RX_6] = { AFE_PORT_ID_TERTIARY_TDM_RX_6,
+ TERTIARY_TDM_RX_6, 1, 1},
+ [TERTIARY_TDM_TX_6] = { AFE_PORT_ID_TERTIARY_TDM_TX_6,
+ TERTIARY_TDM_TX_6, 0, 1},
+ [TERTIARY_TDM_RX_7] = { AFE_PORT_ID_TERTIARY_TDM_RX_7,
+ TERTIARY_TDM_RX_7, 1, 1},
+ [TERTIARY_TDM_TX_7] = { AFE_PORT_ID_TERTIARY_TDM_TX_7,
+ TERTIARY_TDM_TX_7, 0, 1},
+ [QUATERNARY_TDM_RX_0] = { AFE_PORT_ID_QUATERNARY_TDM_RX,
+ QUATERNARY_TDM_RX_0, 1, 1},
+ [QUATERNARY_TDM_TX_0] = { AFE_PORT_ID_QUATERNARY_TDM_TX,
+ QUATERNARY_TDM_TX_0, 0, 1},
+ [QUATERNARY_TDM_RX_1] = { AFE_PORT_ID_QUATERNARY_TDM_RX_1,
+ QUATERNARY_TDM_RX_1, 1, 1},
+ [QUATERNARY_TDM_TX_1] = { AFE_PORT_ID_QUATERNARY_TDM_TX_1,
+ QUATERNARY_TDM_TX_1, 0, 1},
+ [QUATERNARY_TDM_RX_2] = { AFE_PORT_ID_QUATERNARY_TDM_RX_2,
+ QUATERNARY_TDM_RX_2, 1, 1},
+ [QUATERNARY_TDM_TX_2] = { AFE_PORT_ID_QUATERNARY_TDM_TX_2,
+ QUATERNARY_TDM_TX_2, 0, 1},
+ [QUATERNARY_TDM_RX_3] = { AFE_PORT_ID_QUATERNARY_TDM_RX_3,
+ QUATERNARY_TDM_RX_3, 1, 1},
+ [QUATERNARY_TDM_TX_3] = { AFE_PORT_ID_QUATERNARY_TDM_TX_3,
+ QUATERNARY_TDM_TX_3, 0, 1},
+ [QUATERNARY_TDM_RX_4] = { AFE_PORT_ID_QUATERNARY_TDM_RX_4,
+ QUATERNARY_TDM_RX_4, 1, 1},
+ [QUATERNARY_TDM_TX_4] = { AFE_PORT_ID_QUATERNARY_TDM_TX_4,
+ QUATERNARY_TDM_TX_4, 0, 1},
+ [QUATERNARY_TDM_RX_5] = { AFE_PORT_ID_QUATERNARY_TDM_RX_5,
+ QUATERNARY_TDM_RX_5, 1, 1},
+ [QUATERNARY_TDM_TX_5] = { AFE_PORT_ID_QUATERNARY_TDM_TX_5,
+ QUATERNARY_TDM_TX_5, 0, 1},
+ [QUATERNARY_TDM_RX_6] = { AFE_PORT_ID_QUATERNARY_TDM_RX_6,
+ QUATERNARY_TDM_RX_6, 1, 1},
+ [QUATERNARY_TDM_TX_6] = { AFE_PORT_ID_QUATERNARY_TDM_TX_6,
+ QUATERNARY_TDM_TX_6, 0, 1},
+ [QUATERNARY_TDM_RX_7] = { AFE_PORT_ID_QUATERNARY_TDM_RX_7,
+ QUATERNARY_TDM_RX_7, 1, 1},
+ [QUATERNARY_TDM_TX_7] = { AFE_PORT_ID_QUATERNARY_TDM_TX_7,
+ QUATERNARY_TDM_TX_7, 0, 1},
+ [QUINARY_TDM_RX_0] = { AFE_PORT_ID_QUINARY_TDM_RX,
+ QUINARY_TDM_RX_0, 1, 1},
+ [QUINARY_TDM_TX_0] = { AFE_PORT_ID_QUINARY_TDM_TX,
+ QUINARY_TDM_TX_0, 0, 1},
+ [QUINARY_TDM_RX_1] = { AFE_PORT_ID_QUINARY_TDM_RX_1,
+ QUINARY_TDM_RX_1, 1, 1},
+ [QUINARY_TDM_TX_1] = { AFE_PORT_ID_QUINARY_TDM_TX_1,
+ QUINARY_TDM_TX_1, 0, 1},
+ [QUINARY_TDM_RX_2] = { AFE_PORT_ID_QUINARY_TDM_RX_2,
+ QUINARY_TDM_RX_2, 1, 1},
+ [QUINARY_TDM_TX_2] = { AFE_PORT_ID_QUINARY_TDM_TX_2,
+ QUINARY_TDM_TX_2, 0, 1},
+ [QUINARY_TDM_RX_3] = { AFE_PORT_ID_QUINARY_TDM_RX_3,
+ QUINARY_TDM_RX_3, 1, 1},
+ [QUINARY_TDM_TX_3] = { AFE_PORT_ID_QUINARY_TDM_TX_3,
+ QUINARY_TDM_TX_3, 0, 1},
+ [QUINARY_TDM_RX_4] = { AFE_PORT_ID_QUINARY_TDM_RX_4,
+ QUINARY_TDM_RX_4, 1, 1},
+ [QUINARY_TDM_TX_4] = { AFE_PORT_ID_QUINARY_TDM_TX_4,
+ QUINARY_TDM_TX_4, 0, 1},
+ [QUINARY_TDM_RX_5] = { AFE_PORT_ID_QUINARY_TDM_RX_5,
+ QUINARY_TDM_RX_5, 1, 1},
+ [QUINARY_TDM_TX_5] = { AFE_PORT_ID_QUINARY_TDM_TX_5,
+ QUINARY_TDM_TX_5, 0, 1},
+ [QUINARY_TDM_RX_6] = { AFE_PORT_ID_QUINARY_TDM_RX_6,
+ QUINARY_TDM_RX_6, 1, 1},
+ [QUINARY_TDM_TX_6] = { AFE_PORT_ID_QUINARY_TDM_TX_6,
+ QUINARY_TDM_TX_6, 0, 1},
+ [QUINARY_TDM_RX_7] = { AFE_PORT_ID_QUINARY_TDM_RX_7,
+ QUINARY_TDM_RX_7, 1, 1},
+ [QUINARY_TDM_TX_7] = { AFE_PORT_ID_QUINARY_TDM_TX_7,
+ QUINARY_TDM_TX_7, 0, 1},
+};
+
+static void q6afe_port_free(struct kref *ref)
+{
+ struct q6afe_port *port;
+ struct q6afe *afe;
+ unsigned long flags;
+
+ port = container_of(ref, struct q6afe_port, refcount);
+ afe = port->afe;
+ spin_lock_irqsave(&afe->port_list_lock, flags);
+ list_del(&port->node);
+ spin_unlock_irqrestore(&afe->port_list_lock, flags);
+ kfree(port->scfg);
+ kfree(port);
+}
+
+static struct q6afe_port *q6afe_find_port(struct q6afe *afe, int token)
+{
+ struct q6afe_port *p = NULL;
+ struct q6afe_port *ret = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&afe->port_list_lock, flags);
+ list_for_each_entry(p, &afe->port_list, node)
+ if (p->token == token) {
+ ret = p;
+ kref_get(&p->refcount);
+ break;
+ }
+
+ spin_unlock_irqrestore(&afe->port_list_lock, flags);
+ return ret;
+}
+
+static int q6afe_callback(struct apr_device *adev, struct apr_resp_pkt *data)
+{
+ struct q6afe *afe = dev_get_drvdata(&adev->dev);
+ struct aprv2_ibasic_rsp_result_t *res;
+ struct apr_hdr *hdr = &data->hdr;
+>>>>>>>
+ struct q6afe_port *port;
+
+ if (!data->payload_size)
+ return 0;
+
+ res = data->payload;
+<<<<<<<
+ if (data->opcode == APR_BASIC_RSP_RESULT) {
+ if (res->status) {
+ afe->status = res->status;
+ dev_err(afe->dev, "cmd = 0x%x returned error = 0x%x\n",
+ res->opcode, res->status);
+ }
+
+ switch (res->opcode) {
+ case AFE_PORT_CMD_START:
+ case AFE_PORT_CMD_STOP:
+ case AFE_PORT_MULTI_CHAN_HDMI_AUDIO_IF_CONFIG:
+ case AFE_PORT_CMD_SET_PARAM_V2:
+ case AFE_PORT_CMD_DEVICE_STOP:
+ case AFE_PORT_CMD_DEVICE_START:
+ afe->state = AFE_CMD_RESP_AVAIL;
+ port = afe_find_port(afe, data->token);
+ if (port)
+ wake_up(&port->wait);
+
+=======
+ switch (hdr->opcode) {
+ case APR_BASIC_RSP_RESULT: {
+ if (res->status) {
+ dev_err(afe->dev, "cmd = 0x%x returned error = 0x%x\n",
+ res->opcode, res->status);
+ }
+ switch (res->opcode) {
+ case AFE_PORT_CMD_SET_PARAM_V2:
+ case AFE_PORT_CMD_DEVICE_STOP:
+ case AFE_PORT_CMD_DEVICE_START:
+ case AFE_SVC_CMD_SET_PARAM:
+ port = q6afe_find_port(afe, hdr->token);
+ if (port) {
+ port->result = *res;
+ wake_up(&port->wait);
+ kref_put(&port->refcount, q6afe_port_free);
+ }
+>>>>>>>
+ break;
+ default:
+ dev_err(afe->dev, "Unknown cmd 0x%x\n", res->opcode);
+ break;
+ }
+ }
+<<<<<<<
+=======
+ break;
+ default:
+ break;
+ }
+>>>>>>>
+
+ return 0;
+}
+
+/**
+ * q6afe_get_port_id() - Get port id from a given port index
+ *
+ * @index: port index
+ *
+ * Return: Will be an negative on error or valid port_id on success
+ */
+int q6afe_get_port_id(int index)
+{
+ if (index < 0 || index > AFE_PORT_MAX)
+ return -EINVAL;
+
+ return port_maps[index].port_id;
+}
+EXPORT_SYMBOL_GPL(q6afe_get_port_id);
+
+<<<<<<<
+static int afe_apr_send_pkt(struct q6afe *afe, struct apr_pkt *pkt,
+ struct q6afe_port *port)
+{
+ wait_queue_head_t *wait = &port->wait;
+ struct apr_hdr *hdr = &pkt->hdr;
+ int ret;
+
+ mutex_lock(&afe->lock);
+ port->result.opcode = 0;
+ port->result.status = 0;
+
+ ret = apr_send_pkt(afe->apr, pkt);
+ if (ret < 0) {
+ dev_err(afe->dev, "packet not transmitted (%d)\n", ret);
+=======
+static int afe_apr_send_pkt(struct q6afe *afe, void *data,
+ wait_queue_head_t *wait)
+{
+ int ret;
+
+ mutex_lock(&afe->lock);
+ afe->status = 0;
+ afe->state = AFE_CMD_RESP_NONE;
+
+ ret = apr_send_pkt(afe->apr, data);
+ if (ret < 0) {
+ dev_err(afe->dev, "packet not transmitted\n");
+>>>>>>>
+ ret = -EINVAL;
+ goto err;
+ }
+
+<<<<<<<
+ ret = wait_event_timeout(*wait, (afe->state == AFE_CMD_RESP_AVAIL),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ ret = -ETIMEDOUT;
+ } else if (afe->status > 0) {
+ dev_err(afe->dev, "DSP returned error[%s]\n",
+ q6dsp_strerror(afe->status));
+ ret = q6dsp_errno(afe->status);
+=======
+ ret = wait_event_timeout(*wait, (port->result.opcode == hdr->opcode),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ ret = -ETIMEDOUT;
+ } else if (port->result.status > 0) {
+ dev_err(afe->dev, "DSP returned error[%x]\n",
+ port->result.status);
+ ret = -EINVAL;
+>>>>>>>
+ } else {
+ ret = 0;
+ }
+
+err:
+ mutex_unlock(&afe->lock);
+
+ return ret;
+}
+
+<<<<<<<
+static int afe_send_cmd_port_start(struct q6afe_port *port)
+{
+ u16 port_id = port->id;
+ struct afe_port_cmd_device_start start;
+ struct q6afe *afe = port->afe;
+ int ret;
+
+ start.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ start.hdr.pkt_size = sizeof(start);
+ start.hdr.src_port = 0;
+ start.hdr.dest_port = 0;
+ start.hdr.token = port->token;
+ start.hdr.opcode = AFE_PORT_CMD_DEVICE_START;
+ start.port_id = port_id;
+
+ ret = afe_apr_send_pkt(afe, &start, &port->wait);
+=======
+static int q6afe_port_set_param(struct q6afe_port *port, void *data,
+ int param_id, int module_id, int psize)
+{
+ struct afe_svc_cmd_set_param *param;
+ struct afe_port_param_data_v2 *pdata;
+ struct q6afe *afe = port->afe;
+ struct apr_pkt *pkt;
+ u16 port_id = port->id;
+ int ret, pkt_size;
+ void *p, *pl;
+
+ pkt_size = APR_HDR_SIZE + sizeof(*param) + sizeof(*pdata) + psize;
+ p = kzalloc(pkt_size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ param = p + APR_HDR_SIZE;
+ pdata = p + APR_HDR_SIZE + sizeof(*param);
+ pl = p + APR_HDR_SIZE + sizeof(*param) + sizeof(*pdata);
+ memcpy(pl, data, psize);
+
+ pkt->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ pkt->hdr.pkt_size = pkt_size;
+ pkt->hdr.src_port = 0;
+ pkt->hdr.dest_port = 0;
+ pkt->hdr.token = port->token;
+ pkt->hdr.opcode = AFE_SVC_CMD_SET_PARAM;
+
+ param->payload_size = sizeof(*pdata) + psize;
+ param->payload_address_lsw = 0x00;
+ param->payload_address_msw = 0x00;
+ param->mem_map_handle = 0x00;
+ pdata->module_id = module_id;
+ pdata->param_id = param_id;
+ pdata->param_size = psize;
+
+ ret = afe_apr_send_pkt(afe, pkt, port);
+>>>>>>>
+ if (ret)
+ dev_err(afe->dev, "AFE enable for port 0x%x failed %d\n",
+ port_id, ret);
+
+<<<<<<<
+=======
+ kfree(pkt);
+>>>>>>>
+ return ret;
+}
+
+static int q6afe_port_set_param_v2(struct q6afe_port *port, void *data,
+<<<<<<<
+ int param_id, int module_id, int psize)
+{
+ struct afe_port_cmd_set_param_v2 *param;
+ struct afe_port_param_data_v2 *pdata;
+ struct q6afe *afe = port->afe;
+ struct apr_pkt *pkt;
+ u16 port_id = port->id;
+ int ret, pkt_size;
+ void *p, *pl;
+
+ pkt_size = APR_HDR_SIZE + sizeof(*param) + sizeof(*pdata) + psize;
+ p = kzalloc(pkt_size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ param = p + APR_HDR_SIZE;
+ pdata = p + APR_HDR_SIZE + sizeof(*param);
+ pl = p + APR_HDR_SIZE + sizeof(*param) + sizeof(*pdata);
+ memcpy(pl, data, psize);
+
+ pkt->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ pkt->hdr.pkt_size = pkt_size;
+ pkt->hdr.src_port = 0;
+ pkt->hdr.dest_port = 0;
+ pkt->hdr.token = port->token;
+ pkt->hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+
+=======
+ int param_id, int psize)
+{
+ struct apr_hdr *hdr;
+ struct afe_port_cmd_set_param_v2 *param;
+ struct afe_port_param_data_v2 *pdata;
+ struct q6afe *afe = port->afe;
+ u16 port_id = port->id;
+ int ret;
+
+ hdr = data;
+ param = data + sizeof(*hdr);
+ pdata = data + sizeof(*hdr) + sizeof(*param);
+
+ hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ hdr->pkt_size = sizeof(*hdr) + sizeof(*param) +
+ sizeof(*pdata) + psize;
+ hdr->src_port = 0;
+ hdr->dest_port = 0;
+ hdr->token = port->token;
+ hdr->opcode = AFE_PORT_CMD_SET_PARAM_V2;
+>>>>>>>
+ param->port_id = port_id;
+ param->payload_size = sizeof(*pdata) + psize;
+ param->payload_address_lsw = 0x00;
+ param->payload_address_msw = 0x00;
+ param->mem_map_handle = 0x00;
+<<<<<<<
+ pdata->module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+ pdata->param_id = param_id;
+ pdata->param_size = psize;
+
+ ret = afe_apr_send_pkt(afe, data, &port->wait);
+=======
+ pdata->module_id = module_id;
+ pdata->param_id = param_id;
+ pdata->param_size = psize;
+
+ ret = afe_apr_send_pkt(afe, pkt, port);
+>>>>>>>
+ if (ret)
+ dev_err(afe->dev, "AFE enable for port 0x%x failed %d\n",
+ port_id, ret);
+
+<<<<<<<
+ kfree(pkt);
+=======
+
+>>>>>>>
+ return ret;
+}
+
+static int q6afe_set_lpass_clock(struct q6afe_port *port,
+ struct afe_clk_cfg *cfg)
+{
+<<<<<<<
+ return q6afe_port_set_param_v2(port, cfg,
+ AFE_PARAM_ID_LPAIF_CLK_CONFIG,
+ AFE_MODULE_AUDIO_DEV_INTERFACE,
+ sizeof(*cfg));
+}
+
+static int q6afe_set_lpass_clock_v2(struct q6afe_port *port,
+ struct afe_clk_set *cfg)
+{
+ return q6afe_port_set_param(port, cfg, AFE_PARAM_ID_CLOCK_SET,
+ AFE_MODULE_CLOCK_SET, sizeof(*cfg));
+=======
+ struct afe_lpass_clk_config_command clk_cfg = {0};
+ int param_id = AFE_PARAM_ID_LPAIF_CLK_CONFIG;
+ struct q6afe *afe = port->afe;
+
+ if (!cfg) {
+ dev_err(afe->dev, "clock cfg is NULL\n");
+ return -EINVAL;
+ }
+
+ clk_cfg.clk_cfg = *cfg;
+
+ return q6afe_port_set_param_v2(port, &clk_cfg, param_id, sizeof(*cfg));
+>>>>>>>
+}
+
+static int q6afe_set_digital_codec_core_clock(struct q6afe_port *port,
+ struct afe_digital_clk_cfg *cfg)
+{
+<<<<<<<
+ return q6afe_port_set_param_v2(port, cfg,
+ AFE_PARAM_ID_INT_DIGITAL_CDC_CLK_CONFIG,
+ AFE_MODULE_AUDIO_DEV_INTERFACE,
+ sizeof(*cfg));
+=======
+ struct afe_lpass_digital_clk_config_command clk_cfg = {0};
+ int param_id = AFE_PARAM_ID_INTERNAL_DIGITAL_CDC_CLK_CONFIG;
+ struct q6afe *afe = port->afe;
+
+ if (!cfg) {
+ dev_err(afe->dev, "clock cfg is NULL\n");
+ return -EINVAL;
+ }
+
+ clk_cfg.clk_cfg = *cfg;
+
+ return q6afe_port_set_param_v2(port, &clk_cfg, param_id, sizeof(*cfg));
+>>>>>>>
+}
+
+int q6afe_port_set_sysclk(struct q6afe_port *port, int clk_id,
+ int clk_src, int clk_root,
+ unsigned int freq, int dir)
+{
+ struct afe_clk_cfg ccfg = {0,};
+<<<<<<<
+=======
+ struct afe_clk_set cset = {0,};
+>>>>>>>
+ struct afe_digital_clk_cfg dcfg = {0,};
+ int ret;
+
+ switch (clk_id) {
+ case LPAIF_DIG_CLK:
+ dcfg.i2s_cfg_minor_version = AFE_API_VERSION_I2S_CONFIG;
+ dcfg.clk_val = freq;
+ dcfg.clk_root = clk_root;
+ ret = q6afe_set_digital_codec_core_clock(port, &dcfg);
+ break;
+ case LPAIF_BIT_CLK:
+ ccfg.i2s_cfg_minor_version = AFE_API_VERSION_I2S_CONFIG;
+ ccfg.clk_val1 = freq;
+ ccfg.clk_src = clk_src;
+ ccfg.clk_root = clk_root;
+ ccfg.clk_set_mode = Q6AFE_LPASS_MODE_CLK1_VALID;
+ ret = q6afe_set_lpass_clock(port, &ccfg);
+ break;
+
+ case LPAIF_OSR_CLK:
+ ccfg.i2s_cfg_minor_version = AFE_API_VERSION_I2S_CONFIG;
+ ccfg.clk_val2 = freq;
+ ccfg.clk_src = clk_src;
+ ccfg.clk_root = clk_root;
+ ccfg.clk_set_mode = Q6AFE_LPASS_MODE_CLK2_VALID;
+ ret = q6afe_set_lpass_clock(port, &ccfg);
+ break;
+<<<<<<<
+=======
+ case Q6AFE_LPASS_CLK_ID_PRI_MI2S_IBIT ... Q6AFE_LPASS_CLK_ID_QUI_MI2S_OSR:
+ case Q6AFE_LPASS_CLK_ID_MCLK_1 ... Q6AFE_LPASS_CLK_ID_INT_MCLK_1:
+ case Q6AFE_LPASS_CLK_ID_PRI_TDM_IBIT ... Q6AFE_LPASS_CLK_ID_QUIN_TDM_EBIT:
+ cset.clk_set_minor_version = AFE_API_VERSION_CLOCK_SET;
+ cset.clk_id = clk_id;
+ cset.clk_freq_in_hz = freq;
+ cset.clk_attri = clk_src;
+ cset.clk_root = clk_root;
+ cset.enable = !!freq;
+ ret = q6afe_set_lpass_clock_v2(port, &cset);
+ break;
+>>>>>>>
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(q6afe_port_set_sysclk);
+
+<<<<<<<
+/**
+ * q6afe_port_stop() - Stop a afe port
+ *
+ * @port: Instance of port to stop
+ *
+ * Return: Will be an negative on packet size on success.
+ */
+int q6afe_port_stop(struct q6afe_port *port)
+{
+ struct afe_port_cmd_device_stop *stop;
+ struct q6afe *afe = port->afe;
+ struct apr_pkt *pkt;
+ int port_id = port->id;
+ int ret = 0;
+ int index, pkt_size;
+ void *p;
+=======
+static int q6afev2_port_start(struct q6afe_port *port)
+{
+ union afe_port_config *afe_config = &port->port_cfg;
+ struct afe_audioif_config_command config = {0,};
+ struct q6afe *afe = port->afe;
+ int port_id = port->id;
+ int ret, param_id = port->cfg_type;
+
+ config.port = *afe_config;
+
+ ret = q6afe_port_set_param_v2(port, &config, param_id,
+ sizeof(*afe_config));
+ if (ret) {
+ dev_err(afe->dev, "AFE enable for port 0x%x failed %d\n",
+ port_id, ret);
+ return ret;
+ }
+ return afe_send_cmd_port_start(port);
+}
+
+static int q6afev1_port_start(struct q6afe_port *port)
+{
+ union afe_port_config_v1 *afe_config = &port->port_cfgv1;
+ struct afe_port_start_command_v1 start;
+ struct afe_audioif_config_command_v1 config = {0,};
+ struct q6afe *afe = port->afe;
+ int port_id = port->id;
+ struct apr_hdr *hdr;
+ int ret;
+
+ hdr = &config.hdr;
+
+ hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ hdr->pkt_size = sizeof(config);
+ hdr->src_port = 0;
+ hdr->dest_port = 0;
+ hdr->token = port_id;
+ hdr->opcode = port->cfg_type;
+ config.port_id = port_id;
+ config.port = *afe_config;
+
+ ret = afe_apr_send_pkt(afe, &config, &port->wait);
+ if (ret) {
+ dev_err(afe->dev, "AFE enable for port 0x%x failed %d\n",
+ port_id, ret);
+ return ret;
+ }
+
+ hdr = &start.hdr;
+ hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ hdr->pkt_size = sizeof(start);
+ hdr->src_port = 0;
+ hdr->dest_port = 0;
+ hdr->token = port_id;
+ hdr->opcode = AFE_PORT_CMD_START;
+ start.port_id = port_id;
+ start.gain = 0x2000;
+ start.sample_rate = port->rate;
+
+ return afe_apr_send_pkt(afe, &start, &port->wait);
+}
+
+static int q6afev1_port_stop(struct q6afe_port *port)
+{
+ int port_id = port->id;
+ struct afe_port_stop_command_v1 stop;
+ struct q6afe *afe = port->afe;
+ int ret = 0;
+ int index = 0;
+>>>>>>>
+
+ port_id = port->id;
+ index = port->token;
+ if (index < 0 || index > AFE_PORT_MAX) {
+ dev_err(afe->dev, "AFE port index[%d] invalid!\n", index);
+ return -EINVAL;
+ }
+
+<<<<<<<
+ pkt_size = APR_HDR_SIZE + sizeof(*stop);
+ p = kzalloc(pkt_size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ stop = p + APR_HDR_SIZE;
+
+ pkt->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ pkt->hdr.pkt_size = pkt_size;
+ pkt->hdr.src_port = 0;
+ pkt->hdr.dest_port = 0;
+ pkt->hdr.token = index;
+ pkt->hdr.opcode = AFE_PORT_CMD_DEVICE_STOP;
+ stop->port_id = port_id;
+ stop->reserved = 0;
+
+ ret = afe_apr_send_pkt(afe, pkt, port);
+ if (ret)
+ dev_err(afe->dev, "AFE close failed %d\n", ret);
+
+ kfree(pkt);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(q6afe_port_stop);
+
+/**
+=======
+ stop.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ stop.hdr.pkt_size = sizeof(stop);
+ stop.hdr.src_port = 0;
+ stop.hdr.dest_port = 0;
+ stop.hdr.token = port_id;
+ stop.hdr.opcode = AFE_PORT_CMD_STOP;
+ stop.port_id = port_id;
+ stop.reserved = 0;
+
+ ret = afe_apr_send_pkt(afe, &stop, &port->wait);
+ if (ret)
+ dev_err(afe->dev, "AFE close failed %d\n", ret);
+
+ return ret;
+}
+
+static int q6afev2_port_stop(struct q6afe_port *port)
+{
+ int port_id = port->id;
+ struct afe_port_cmd_device_stop stop;
+ struct q6afe *afe = port->afe;
+ int ret = 0;
+ int index = 0;
+
+ port_id = port->id;
+ index = port->token;
+ if (index < 0 || index > AFE_PORT_MAX) {
+ dev_err(afe->dev, "AFE port index[%d] invalid!\n", index);
+ return -EINVAL;
+ }
+
+ stop.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ stop.hdr.pkt_size = sizeof(stop);
+ stop.hdr.src_port = 0;
+ stop.hdr.dest_port = 0;
+ stop.hdr.token = index;
+ stop.hdr.opcode = AFE_PORT_CMD_DEVICE_STOP;
+ stop.port_id = port_id;
+ stop.reserved = 0;
+
+ ret = afe_apr_send_pkt(afe, &stop, &port->wait);
+ if (ret)
+ dev_err(afe->dev, "AFE close failed %d\n", ret);
+
+ return ret;
+}
+/**
+ * q6afe_port_stop() - Stop a afe port
+ *
+ * @port: Instance of port to stop
+ *
+ * Return: Will be an negative on packet size on success.
+ */
+int q6afe_port_stop(struct q6afe_port *port)
+{
+ return port->afe->ops->port_stop(port);
+}
+EXPORT_SYMBOL_GPL(q6afe_port_stop);
+
+/**
+ * q6afe_set_dai_data() - set dai private data
+ *
+ * @dev: Pointer to afe device.
+ * @data: dai private data
+ *
+ */
+void q6afe_set_dai_data(struct device *dev, void *data)
+{
+ struct q6afe *afe = dev_get_drvdata(dev);
+
+ afe->dai_data = data;
+}
+EXPORT_SYMBOL_GPL(q6afe_set_dai_data);
+
+/**
+ * q6afe_get_dai_data() - get dai private data
+ *
+ * @dev: Pointer to afe device.
+ *
+ * Return: pointer to dai private data
+ */
+void *q6afe_get_dai_data(struct device *dev)
+{
+ struct q6afe *afe = dev_get_drvdata(dev);
+
+ return afe->dai_data;
+}
+EXPORT_SYMBOL_GPL(q6afe_get_dai_data);
+
+/**
+>>>>>>>
+ * q6afe_slim_port_prepare() - Prepare slim afe port.
+ *
+ * @port: Instance of afe port
+ * @cfg: SLIM configuration for the afe port
+ *
+ */
+void q6afe_slim_port_prepare(struct q6afe_port *port,
+ struct q6afe_slim_cfg *cfg)
+{
+ union afe_port_config *pcfg = &port->port_cfg;
+
+ pcfg->slim_cfg.sb_cfg_minor_version = AFE_API_VERSION_SLIMBUS_CONFIG;
+ pcfg->slim_cfg.sample_rate = cfg->sample_rate;
+ pcfg->slim_cfg.bit_width = cfg->bit_width;
+ pcfg->slim_cfg.num_channels = cfg->num_channels;
+ pcfg->slim_cfg.data_format = cfg->data_format;
+ pcfg->slim_cfg.shared_ch_mapping[0] = cfg->ch_mapping[0];
+ pcfg->slim_cfg.shared_ch_mapping[1] = cfg->ch_mapping[1];
+ pcfg->slim_cfg.shared_ch_mapping[2] = cfg->ch_mapping[2];
+ pcfg->slim_cfg.shared_ch_mapping[3] = cfg->ch_mapping[3];
+
+}
+EXPORT_SYMBOL_GPL(q6afe_slim_port_prepare);
+
+<<<<<<<
+/**
+ * q6afe_tdm_port_prepare() - Prepare tdm afe port.
+ *
+ * @port: Instance of afe port
+ * @cfg: TDM configuration for the afe port
+ *
+ */
+void q6afe_tdm_port_prepare(struct q6afe_port *port,
+ struct q6afe_tdm_cfg *cfg)
+{
+ union afe_port_config *pcfg = &port->port_cfg;
+
+ pcfg->tdm_cfg.tdm_cfg_minor_version = AFE_API_VERSION_TDM_CONFIG;
+ pcfg->tdm_cfg.num_channels = cfg->num_channels;
+ pcfg->tdm_cfg.sample_rate = cfg->sample_rate;
+ pcfg->tdm_cfg.bit_width = cfg->bit_width;
+ pcfg->tdm_cfg.data_format = cfg->data_format;
+ pcfg->tdm_cfg.sync_mode = cfg->sync_mode;
+ pcfg->tdm_cfg.sync_src = cfg->sync_src;
+ pcfg->tdm_cfg.nslots_per_frame = cfg->nslots_per_frame;
+
+ pcfg->tdm_cfg.slot_width = cfg->slot_width;
+ pcfg->tdm_cfg.slot_mask = cfg->slot_mask;
+ port->scfg = kzalloc(sizeof(*port->scfg), GFP_KERNEL);
+ if (!port->scfg)
+ return;
+
+ port->scfg->minor_version = AFE_API_VERSION_SLOT_MAPPING_CONFIG;
+ port->scfg->num_channels = cfg->num_channels;
+ port->scfg->bitwidth = cfg->bit_width;
+ port->scfg->data_align_type = cfg->data_align_type;
+ memcpy(port->scfg->ch_mapping, cfg->ch_mapping,
+ sizeof(u16) * AFE_PORT_MAX_AUDIO_CHAN_CNT);
+}
+EXPORT_SYMBOL_GPL(q6afe_tdm_port_prepare);
+=======
+void q6afev1_hdmi_port_prepare(struct q6afe_port *port,
+ struct q6afe_hdmi_cfg *cfg)
+{
+ union afe_port_config_v1 *pcfg = &port->port_cfgv1;
+
+ pcfg->hdmi_multi_ch.data_type = cfg->datatype;
+ pcfg->hdmi_multi_ch.channel_allocation = cfg->channel_allocation;
+ pcfg->hdmi_multi_ch.reserved = 0;
+ port->rate = cfg->sample_rate;
+ //FIXME RATE
+// pcfg->hdmi_multi_ch.sample_rate = cfg->sample_rate;
+// pcfg->hdmi_multi_ch.bit_width = cfg->bit_width;
+}
+
+void q6afev2_hdmi_port_prepare(struct q6afe_port *port,
+ struct q6afe_hdmi_cfg *cfg)
+{
+ union afe_port_config *pcfg = &port->port_cfg;
+
+ pcfg->hdmi_multi_ch.hdmi_cfg_minor_version =
+ AFE_API_VERSION_HDMI_CONFIG;
+ pcfg->hdmi_multi_ch.datatype = cfg->datatype;
+ pcfg->hdmi_multi_ch.channel_allocation = cfg->channel_allocation;
+ pcfg->hdmi_multi_ch.sample_rate = cfg->sample_rate;
+ pcfg->hdmi_multi_ch.bit_width = cfg->bit_width;
+}
+>>>>>>>
+
+/**
+ * q6afe_hdmi_port_prepare() - Prepare hdmi afe port.
+ *
+ * @port: Instance of afe port
+ * @cfg: HDMI configuration for the afe port
+ *
+ */
+void q6afe_hdmi_port_prepare(struct q6afe_port *port,
+ struct q6afe_hdmi_cfg *cfg)
+{
+<<<<<<<
+ port->afe->ops->hdmi_prepare(port, cfg);
+=======
+ union afe_port_config *pcfg = &port->port_cfg;
+
+ pcfg->hdmi_multi_ch.hdmi_cfg_minor_version =
+ AFE_API_VERSION_HDMI_CONFIG;
+ pcfg->hdmi_multi_ch.datatype = cfg->datatype;
+ pcfg->hdmi_multi_ch.channel_allocation = cfg->channel_allocation;
+ pcfg->hdmi_multi_ch.sample_rate = cfg->sample_rate;
+ pcfg->hdmi_multi_ch.bit_width = cfg->bit_width;
+>>>>>>>
+}
+EXPORT_SYMBOL_GPL(q6afe_hdmi_port_prepare);
+
+/**
+ * q6afe_i2s_port_prepare() - Prepare i2s afe port.
+ *
+ * @port: Instance of afe port
+ * @cfg: I2S configuration for the afe port
+<<<<<<<
+ *
+ */
+void q6afe_i2s_port_prepare(struct q6afe_port *port, struct q6afe_i2s_cfg *cfg)
+{
+ union afe_port_config *pcfg = &port->port_cfg;
+=======
+ * Return: Will be an negative on error and zero on success.
+ */
+int q6afe_i2s_port_prepare(struct q6afe_port *port, struct q6afe_i2s_cfg *cfg)
+{
+ union afe_port_config *pcfg = &port->port_cfg;
+ struct device *dev = port->afe->dev;
+ int num_sd_lines;
+>>>>>>>
+
+ pcfg->i2s_cfg.i2s_cfg_minor_version = AFE_API_VERSION_I2S_CONFIG;
+ pcfg->i2s_cfg.sample_rate = cfg->sample_rate;
+ pcfg->i2s_cfg.bit_width = cfg->bit_width;
+ pcfg->i2s_cfg.data_format = AFE_LINEAR_PCM_DATA;
+
+ switch (cfg->fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ pcfg->i2s_cfg.ws_src = AFE_PORT_CONFIG_I2S_WS_SRC_INTERNAL;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFM:
+ /* CPU is slave */
+ pcfg->i2s_cfg.ws_src = AFE_PORT_CONFIG_I2S_WS_SRC_EXTERNAL;
+ break;
+ default:
+ break;
+ }
+
+<<<<<<<
+ num_sd_lines = hweight_long(cfg->sd_line_mask);
+
+ switch (num_sd_lines) {
+ case 0:
+ dev_err(dev, "no line is assigned\n");
+ return -EINVAL;
+ case 1:
+ switch (cfg->sd_line_mask) {
+ case AFE_PORT_I2S_SD0_MASK:
+ pcfg->i2s_cfg.channel_mode = AFE_PORT_I2S_SD0;
+ break;
+ case AFE_PORT_I2S_SD1_MASK:
+ pcfg->i2s_cfg.channel_mode = AFE_PORT_I2S_SD1;
+ break;
+ case AFE_PORT_I2S_SD2_MASK:
+ pcfg->i2s_cfg.channel_mode = AFE_PORT_I2S_SD2;
+ break;
+ case AFE_PORT_I2S_SD3_MASK:
+ pcfg->i2s_cfg.channel_mode = AFE_PORT_I2S_SD3;
+ break;
+ default:
+ dev_err(dev, "Invalid SD lines\n");
+ return -EINVAL;
+ }
+ break;
+ case 2:
+ switch (cfg->sd_line_mask) {
+ case AFE_PORT_I2S_SD0_1_MASK:
+ pcfg->i2s_cfg.channel_mode = AFE_PORT_I2S_QUAD01;
+ break;
+ case AFE_PORT_I2S_SD2_3_MASK:
+ pcfg->i2s_cfg.channel_mode = AFE_PORT_I2S_QUAD23;
+ break;
+ default:
+ dev_err(dev, "Invalid SD lines\n");
+ return -EINVAL;
+ }
+ break;
+ case 3:
+ switch (cfg->sd_line_mask) {
+ case AFE_PORT_I2S_SD0_1_2_MASK:
+ pcfg->i2s_cfg.channel_mode = AFE_PORT_I2S_6CHS;
+ break;
+ default:
+ dev_err(dev, "Invalid SD lines\n");
+ return -EINVAL;
+ }
+ break;
+ case 4:
+ switch (cfg->sd_line_mask) {
+ case AFE_PORT_I2S_SD0_1_2_3_MASK:
+ pcfg->i2s_cfg.channel_mode = AFE_PORT_I2S_8CHS;
+
+ break;
+ default:
+ dev_err(dev, "Invalid SD lines\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ dev_err(dev, "Invalid SD lines\n");
+ return -EINVAL;
+ }
+
+ switch (cfg->num_channels) {
+ case 1:
+ case 2:
+ switch (pcfg->i2s_cfg.channel_mode) {
+ case AFE_PORT_I2S_QUAD01:
+ case AFE_PORT_I2S_6CHS:
+ case AFE_PORT_I2S_8CHS:
+ pcfg->i2s_cfg.channel_mode = AFE_PORT_I2S_SD0;
+ break;
+ case AFE_PORT_I2S_QUAD23:
+ pcfg->i2s_cfg.channel_mode = AFE_PORT_I2S_SD2;
+ break;
+ }
+
+ if (cfg->num_channels == 2)
+ pcfg->i2s_cfg.mono_stereo = AFE_PORT_I2S_STEREO;
+ else
+ pcfg->i2s_cfg.mono_stereo = AFE_PORT_I2S_MONO;
+
+ break;
+ case 3:
+ case 4:
+ if (pcfg->i2s_cfg.channel_mode < AFE_PORT_I2S_QUAD01) {
+ dev_err(dev, "Invalid Channel mode\n");
+ return -EINVAL;
+ }
+ break;
+ case 5:
+ case 6:
+ if (pcfg->i2s_cfg.channel_mode < AFE_PORT_I2S_6CHS) {
+ dev_err(dev, "Invalid Channel mode\n");
+ return -EINVAL;
+ }
+ break;
+ case 7:
+ case 8:
+ if (pcfg->i2s_cfg.channel_mode < AFE_PORT_I2S_8CHS) {
+ dev_err(dev, "Invalid Channel mode\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+=======
+ switch (cfg->num_channels) {
+ case 1:
+ pcfg->i2s_cfg.mono_stereo = AFE_PORT_I2S_MONO;
+ pcfg->i2s_cfg.channel_mode = AFE_PORT_I2S_SD0;
+ break;
+ case 2:
+ pcfg->i2s_cfg.channel_mode = AFE_PORT_I2S_SD0;
+ pcfg->i2s_cfg.mono_stereo = AFE_PORT_I2S_STEREO;
+ break;
+ case 3:
+ case 4:
+ pcfg->i2s_cfg.channel_mode = AFE_PORT_I2S_QUAD01;
+ break;
+ case 5:
+ case 6:
+ pcfg->i2s_cfg.channel_mode = AFE_PORT_I2S_6CHS;
+ break;
+ case 7:
+ case 8:
+ pcfg->i2s_cfg.channel_mode = AFE_PORT_I2S_8CHS;
+ break;
+ default:
+ break;
+ }
+>>>>>>>
+}
+EXPORT_SYMBOL_GPL(q6afe_i2s_port_prepare);
+
+/**
+ * q6afe_port_start() - Start a afe port
+ *
+ * @port: Instance of port to start
+ *
+ * Return: Will be an negative on packet size on success.
+ */
+int q6afe_port_start(struct q6afe_port *port)
+{
+<<<<<<<
+ return port->afe->ops->port_start(port);
+}
+EXPORT_SYMBOL_GPL(q6afe_port_start);
+
+struct q6afe_port *q6afev1_port_get_from_id(struct device *dev, int port_id)
+{
+ struct q6afe *afe = dev_get_drvdata(dev);
+ struct q6afe_port *port;
+ int cfg_type;
+
+ if (port_id < 0 || port_id > AFE_PORT_MAX) {
+ dev_err(dev, "AFE port token[%d] invalid!\n", port_id);
+ return ERR_PTR(-EINVAL);
+ }
+
+
+ switch (port_id) {
+ case AFE_PORT_HDMI_RX:
+ cfg_type = AFE_PORT_MULTI_CHAN_HDMI_AUDIO_IF_CONFIG;
+ break;
+ default:
+ dev_err(dev, "Invalid port id 0x%x\n", port_id);
+ return ERR_PTR(-EINVAL);
+ }
+
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return ERR_PTR(-ENOMEM);
+
+ init_waitqueue_head(&port->wait);
+
+ port->token = port_id;
+ port->id = port_id;
+ port->afe = afe;
+ port->cfg_type = cfg_type;
+
+ spin_lock(&afe->port_list_lock);
+ list_add_tail(&port->node, &afe->port_list);
+ spin_unlock(&afe->port_list_lock);
+
+ return port;
+
+}
+
+struct q6afe_port *q6afev2_port_get_from_id(struct device *dev, int id)
+{
+ int port_id;
+ struct q6afe *afe = dev_get_drvdata(dev);
+ struct q6afe_port *port;
+=======
+ struct afe_port_cmd_device_start *start;
+ struct q6afe *afe = port->afe;
+ int port_id = port->id;
+ int ret, param_id = port->cfg_type;
+ struct apr_pkt *pkt;
+ int pkt_size;
+ void *p;
+
+ ret = q6afe_port_set_param_v2(port, &port->port_cfg, param_id,
+ AFE_MODULE_AUDIO_DEV_INTERFACE,
+ sizeof(port->port_cfg));
+ if (ret) {
+ dev_err(afe->dev, "AFE enable for port 0x%x failed %d\n",
+ port_id, ret);
+ return ret;
+ }
+
+ if (port->scfg) {
+ ret = q6afe_port_set_param_v2(port, port->scfg,
+ AFE_PARAM_ID_PORT_SLOT_MAPPING_CONFIG,
+ AFE_MODULE_TDM, sizeof(*port->scfg));
+ if (ret) {
+ dev_err(afe->dev, "AFE enable for port 0x%x failed %d\n",
+ port_id, ret);
+ return ret;
+ }
+ }
+
+ pkt_size = APR_HDR_SIZE + sizeof(*start);
+ p = kzalloc(pkt_size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ start = p + APR_HDR_SIZE;
+
+ pkt->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ pkt->hdr.pkt_size = pkt_size;
+ pkt->hdr.src_port = 0;
+ pkt->hdr.dest_port = 0;
+ pkt->hdr.token = port->token;
+ pkt->hdr.opcode = AFE_PORT_CMD_DEVICE_START;
+
+ start->port_id = port_id;
+
+ ret = afe_apr_send_pkt(afe, pkt, port);
+ if (ret)
+ dev_err(afe->dev, "AFE enable for port 0x%x failed %d\n",
+ port_id, ret);
+
+ kfree(pkt);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(q6afe_port_start);
+
+/**
+ * q6afe_port_get_from_id() - Get port instance from a port id
+ *
+ * @dev: Pointer to afe child device.
+ * @id: port id
+ *
+ * Return: Will be an error pointer on error or a valid afe port
+ * on success.
+ */
+struct q6afe_port *q6afe_port_get_from_id(struct device *dev, int id)
+{
+ int port_id;
+ struct q6afe *afe = dev_get_drvdata(dev->parent);
+ struct q6afe_port *port;
+ unsigned long flags;
+>>>>>>>
+ int cfg_type;
+
+ if (id < 0 || id > AFE_PORT_MAX) {
+ dev_err(dev, "AFE port token[%d] invalid!\n", id);
+ return ERR_PTR(-EINVAL);
+ }
+
+<<<<<<<
+=======
+ /* if port is multiple times bind/unbind before callback finishes */
+ port = q6afe_find_port(afe, id);
+ if (port) {
+ dev_err(dev, "AFE Port already open\n");
+ return port;
+ }
+
+>>>>>>>
+ port_id = port_maps[id].port_id;
+
+ switch (port_id) {
+ case AFE_PORT_ID_MULTICHAN_HDMI_RX:
+ cfg_type = AFE_PARAM_ID_HDMI_CONFIG;
+ break;
+ case AFE_PORT_ID_SLIMBUS_MULTI_CHAN_0_RX:
+ case AFE_PORT_ID_SLIMBUS_MULTI_CHAN_1_RX:
+ case AFE_PORT_ID_SLIMBUS_MULTI_CHAN_2_RX:
+ case AFE_PORT_ID_SLIMBUS_MULTI_CHAN_3_RX:
+ case AFE_PORT_ID_SLIMBUS_MULTI_CHAN_4_RX:
+ case AFE_PORT_ID_SLIMBUS_MULTI_CHAN_5_RX:
+ case AFE_PORT_ID_SLIMBUS_MULTI_CHAN_6_RX:
+ cfg_type = AFE_PARAM_ID_SLIMBUS_CONFIG;
+ break;
+
+ case AFE_PORT_ID_PRIMARY_MI2S_RX:
+ case AFE_PORT_ID_PRIMARY_MI2S_TX:
+ case AFE_PORT_ID_SECONDARY_MI2S_RX:
+ case AFE_PORT_ID_SECONDARY_MI2S_TX:
+ case AFE_PORT_ID_TERTIARY_MI2S_RX:
+ case AFE_PORT_ID_TERTIARY_MI2S_TX:
+ case AFE_PORT_ID_QUATERNARY_MI2S_RX:
+ case AFE_PORT_ID_QUATERNARY_MI2S_TX:
+ cfg_type = AFE_PARAM_ID_I2S_CONFIG;
+ break;
+<<<<<<<
+=======
+ case AFE_PORT_ID_PRIMARY_TDM_RX ... AFE_PORT_ID_QUINARY_TDM_TX_7:
+ cfg_type = AFE_PARAM_ID_TDM_CONFIG;
+ break;
+
+>>>>>>>
+ default:
+ dev_err(dev, "Invalid port id 0x%x\n", port_id);
+ return ERR_PTR(-EINVAL);
+ }
+
+<<<<<<<
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+=======
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+>>>>>>>
+ if (!port)
+ return ERR_PTR(-ENOMEM);
+
+ init_waitqueue_head(&port->wait);
+
+ port->token = id;
+ port->id = port_id;
+ port->afe = afe;
+ port->cfg_type = cfg_type;
+<<<<<<<
+ kref_init(&port->refcount);
+
+ spin_lock_irqsave(&afe->port_list_lock, flags);
+ list_add_tail(&port->node, &afe->port_list);
+ spin_unlock_irqrestore(&afe->port_list_lock, flags);
+=======
+
+ spin_lock(&afe->port_list_lock);
+ list_add_tail(&port->node, &afe->port_list);
+ spin_unlock(&afe->port_list_lock);
+>>>>>>>
+
+ return port;
+
+}
+<<<<<<<
+=======
+/**
+ * q6afe_port_get_from_id() - Get port instance from a port id
+ *
+ * @dev: Pointer to afe child device.
+ * @id: port id
+ *
+ * Return: Will be an error pointer on error or a valid afe port
+ * on success.
+ */
+struct q6afe_port *q6afe_port_get_from_id(struct device *dev, int id)
+{
+ struct q6afe *afe = dev_get_drvdata(dev);
+
+ return afe->ops->port_get_from_id(dev, id);
+}
+
+>>>>>>>
+EXPORT_SYMBOL_GPL(q6afe_port_get_from_id);
+
+/**
+ * q6afe_port_put() - Release port reference
+ *
+ * @port: Instance of port to put
+ */
+void q6afe_port_put(struct q6afe_port *port)
+{
+<<<<<<<
+ kref_put(&port->refcount, q6afe_port_free);
+}
+EXPORT_SYMBOL_GPL(q6afe_port_put);
+
+static int q6afe_probe(struct apr_device *adev)
+{
+ struct q6afe *afe;
+ struct device *dev = &adev->dev;
+ struct device_node *dais_np;
+=======
+ struct q6afe *afe = port->afe;
+
+ spin_lock(&afe->port_list_lock);
+ list_del(&port->node);
+ spin_unlock(&afe->port_list_lock);
+}
+EXPORT_SYMBOL_GPL(q6afe_port_put);
+
+struct q6afe_ops q6afev1_ops = {
+ .port_stop = q6afev1_port_stop,
+ .port_start = q6afev1_port_start,
+ .hdmi_prepare = q6afev1_hdmi_port_prepare,
+ .port_get_from_id = q6afev1_port_get_from_id,
+};
+
+struct q6afe_ops q6afev2_ops = {
+ .port_stop = q6afev2_port_stop,
+ .port_start = q6afev2_port_start,
+ .hdmi_prepare = q6afev2_hdmi_port_prepare,
+ .port_get_from_id = q6afev2_port_get_from_id,
+};
+
+static int q6afev2_probe(struct apr_device *adev)
+{
+ struct q6afe *afe;
+ struct device *dev = &adev->dev;
+>>>>>>>
+
+ afe = devm_kzalloc(dev, sizeof(*afe), GFP_KERNEL);
+ if (!afe)
+ return -ENOMEM;
+
+<<<<<<<
+ adev->version = q6core_get_svc_version(adev->svc_id);
+
+ if (APR_SVC_MAJOR_VERSION(adev->version) >= 0x20)
+ afe->ops = &q6afev2_ops;
+ else if (!APR_SVC_MAJOR_VERSION(adev->version))
+ afe->ops = &q6afev1_ops;
+
+ if (!afe->ops) {
+ dev_err(&adev->dev, "Unsupported AFE version\n");
+ return -EINVAL;
+ }
+
+=======
+ q6core_get_svc_api_info(adev->svc_id, &afe->ainfo);
+>>>>>>>
+ afe->apr = adev;
+ mutex_init(&afe->lock);
+ afe->dev = dev;
+ INIT_LIST_HEAD(&afe->port_list);
+ spin_lock_init(&afe->port_list_lock);
+
+ dev_set_drvdata(dev, afe);
+
+<<<<<<<
+ dais_np = of_get_child_by_name(dev->of_node, "dais");
+ if (dais_np) {
+ afe->pdev_dais = of_platform_device_create(dais_np,
+ "q6afe-dai", dev);
+ of_node_put(dais_np);
+ }
+
+ return 0;
+}
+
+static int q6afe_remove(struct apr_device *adev)
+{
+ struct q6afe *afe = dev_get_drvdata(&adev->dev);
+
+ if (afe->pdev_dais)
+ of_platform_device_destroy(&afe->pdev_dais->dev, NULL);
+
+ return 0;
+=======
+ return q6afe_dai_dev_probe(dev);
+}
+
+static int q6afev2_remove(struct apr_device *adev)
+{
+ return q6afe_dai_dev_remove(&adev->dev);
+>>>>>>>
+}
+
+static const struct of_device_id q6afe_device_id[] = {
+ { .compatible = "qcom,q6afe" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, q6afe_device_id);
+
+static struct apr_driver qcom_q6afe_driver = {
+<<<<<<<
+ .probe = q6afe_probe,
+ .remove = q6afe_remove,
+ .callback = q6afe_callback,
+=======
+ .probe = q6afev2_probe,
+ .remove = q6afev2_remove,
+ .callback = afe_callback,
+>>>>>>>
+ .driver = {
+ .name = "qcom-q6afe",
+ .of_match_table = of_match_ptr(q6afe_device_id),
+
+ },
+};
+
+module_apr_driver(qcom_q6afe_driver);
+MODULE_DESCRIPTION("Q6 Audio Front End");
+MODULE_LICENSE("GPL v2");
diff --git a/rr-cache/ef2ee746d553cafd4753c40a810c15f4701af2dc/postimage b/rr-cache/ef2ee746d553cafd4753c40a810c15f4701af2dc/postimage
new file mode 100644
index 0000000..c5d52e2
--- /dev/null
+++ b/rr-cache/ef2ee746d553cafd4753c40a810c15f4701af2dc/postimage
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __QCOM_APR_H_
+#define __QCOM_APR_H_
+
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <dt-bindings/soc/qcom,apr.h>
+
+extern struct bus_type aprbus;
+
+#define APR_HDR_LEN(hdr_len) ((hdr_len)/4)
+
+/*
+ * HEADER field
+ * version:0:3
+ * header_size : 4:7
+ * message_type : 8:9
+ * reserved: 10:15
+ */
+#define APR_HDR_FIELD(msg_type, hdr_len, ver)\
+ (((msg_type & 0x3) << 8) | ((hdr_len & 0xF) << 4) | (ver & 0xF))
+
+#define APR_HDR_SIZE sizeof(struct apr_hdr)
+#define APR_SEQ_CMD_HDR_FIELD APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \
+ APR_HDR_LEN(APR_HDR_SIZE), \
+ APR_PKT_VER)
+/* Version */
+#define APR_PKT_VER 0x0
+
+/* Command and Response Types */
+#define APR_MSG_TYPE_EVENT 0x0
+#define APR_MSG_TYPE_CMD_RSP 0x1
+#define APR_MSG_TYPE_SEQ_CMD 0x2
+#define APR_MSG_TYPE_NSEQ_CMD 0x3
+#define APR_MSG_TYPE_MAX 0x04
+
+/* APR Basic Response Message */
+#define APR_BASIC_RSP_RESULT 0x000110E8
+#define APR_RSP_ACCEPTED 0x000100BE
+
+struct aprv2_ibasic_rsp_result_t {
+ uint32_t opcode;
+ uint32_t status;
+};
+
+/* hdr field Ver [0:3], Size [4:7], Message type [8:10] */
+#define APR_HDR_FIELD_VER(h) (h & 0x000F)
+#define APR_HDR_FIELD_SIZE(h) ((h & 0x00F0) >> 4)
+#define APR_HDR_FIELD_SIZE_BYTES(h) (((h & 0x00F0) >> 4) * 4)
+#define APR_HDR_FIELD_MT(h) ((h & 0x0300) >> 8)
+
+struct apr_hdr {
+ uint16_t hdr_field;
+ uint16_t pkt_size;
+ uint8_t src_svc;
+ uint8_t src_domain;
+ uint16_t src_port;
+ uint8_t dest_svc;
+ uint8_t dest_domain;
+ uint16_t dest_port;
+ uint32_t token;
+ uint32_t opcode;
+} __packed;
+
+struct apr_pkt {
+ struct apr_hdr hdr;
+ uint8_t payload[];
+};
+
+struct apr_resp_pkt {
+ struct apr_hdr hdr;
+ void *payload;
+ int payload_size;
+};
+
+/* Bits 0 to 15 -- Minor version, Bits 16 to 31 -- Major version */
+#define APR_SVC_MAJOR_VERSION(v) ((v >> 16) & 0xFF)
+#define APR_SVC_MINOR_VERSION(v) (v & 0xFF)
+
+struct apr_device {
+ struct device dev;
+ uint16_t svc_id;
+ uint16_t domain_id;
+ uint32_t version;
+ char name[APR_NAME_SIZE];
+ spinlock_t lock;
+ struct list_head node;
+};
+
+#define to_apr_device(d) container_of(d, struct apr_device, dev)
+
+struct apr_driver {
+ int (*probe)(struct apr_device *sl);
+ int (*remove)(struct apr_device *sl);
+ int (*callback)(struct apr_device *a,
+ struct apr_resp_pkt *d);
+ struct device_driver driver;
+ const struct apr_device_id *id_table;
+};
+
+#define to_apr_driver(d) container_of(d, struct apr_driver, driver)
+
+/*
+ * use a macro to avoid include chaining to get THIS_MODULE
+ */
+#define apr_driver_register(drv) __apr_driver_register(drv, THIS_MODULE)
+
+int __apr_driver_register(struct apr_driver *drv, struct module *owner);
+void apr_driver_unregister(struct apr_driver *drv);
+
+/**
+ * module_apr_driver() - Helper macro for registering a aprbus driver
+ * @__aprbus_driver: aprbus_driver struct
+ *
+ * Helper macro for aprbus drivers which do not do anything special in
+ * module init/exit. This eliminates a lot of boilerplate. Each module
+ * may only use this macro once, and calling it replaces module_init()
+ * and module_exit()
+ */
+#define module_apr_driver(__apr_driver) \
+ module_driver(__apr_driver, apr_driver_register, \
+ apr_driver_unregister)
+
+int apr_send_pkt(struct apr_device *adev, struct apr_pkt *pkt);
+
+#endif /* __QCOM_APR_H_ */
diff --git a/rr-cache/ef2ee746d553cafd4753c40a810c15f4701af2dc/preimage b/rr-cache/ef2ee746d553cafd4753c40a810c15f4701af2dc/preimage
new file mode 100644
index 0000000..a4d3e92
--- /dev/null
+++ b/rr-cache/ef2ee746d553cafd4753c40a810c15f4701af2dc/preimage
@@ -0,0 +1,166 @@
+<<<<<<<
+/* SPDX-License-Identifier: GPL-2.0 */
+=======
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2011-2017, The Linux Foundation
+ * Copyright (c) 2018, Linaro Limited
+ */
+>>>>>>>
+
+#ifndef __QCOM_APR_H_
+#define __QCOM_APR_H_
+
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <dt-bindings/soc/qcom,apr.h>
+
+<<<<<<<
+=======
+extern struct bus_type aprbus;
+
+>>>>>>>
+#define APR_HDR_LEN(hdr_len) ((hdr_len)/4)
+
+/*
+ * HEADER field
+ * version:0:3
+ * header_size : 4:7
+ * message_type : 8:9
+ * reserved: 10:15
+ */
+#define APR_HDR_FIELD(msg_type, hdr_len, ver)\
+ (((msg_type & 0x3) << 8) | ((hdr_len & 0xF) << 4) | (ver & 0xF))
+
+#define APR_HDR_SIZE sizeof(struct apr_hdr)
+#define APR_SEQ_CMD_HDR_FIELD APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \
+ APR_HDR_LEN(APR_HDR_SIZE), \
+ APR_PKT_VER)
+/* Version */
+#define APR_PKT_VER 0x0
+
+/* Command and Response Types */
+#define APR_MSG_TYPE_EVENT 0x0
+#define APR_MSG_TYPE_CMD_RSP 0x1
+#define APR_MSG_TYPE_SEQ_CMD 0x2
+#define APR_MSG_TYPE_NSEQ_CMD 0x3
+#define APR_MSG_TYPE_MAX 0x04
+
+/* APR Basic Response Message */
+#define APR_BASIC_RSP_RESULT 0x000110E8
+#define APR_RSP_ACCEPTED 0x000100BE
+
+struct aprv2_ibasic_rsp_result_t {
+ uint32_t opcode;
+ uint32_t status;
+};
+
+/* hdr field Ver [0:3], Size [4:7], Message type [8:10] */
+#define APR_HDR_FIELD_VER(h) (h & 0x000F)
+#define APR_HDR_FIELD_SIZE(h) ((h & 0x00F0) >> 4)
+#define APR_HDR_FIELD_SIZE_BYTES(h) (((h & 0x00F0) >> 4) * 4)
+#define APR_HDR_FIELD_MT(h) ((h & 0x0300) >> 8)
+
+struct apr_hdr {
+ uint16_t hdr_field;
+ uint16_t pkt_size;
+ uint8_t src_svc;
+ uint8_t src_domain;
+ uint16_t src_port;
+ uint8_t dest_svc;
+ uint8_t dest_domain;
+ uint16_t dest_port;
+ uint32_t token;
+ uint32_t opcode;
+<<<<<<<
+} __packed;
+
+struct apr_pkt {
+ struct apr_hdr hdr;
+ uint8_t payload[];
+};
+
+struct apr_resp_pkt {
+ struct apr_hdr hdr;
+ void *payload;
+ int payload_size;
+=======
+};
+
+struct apr_client_message {
+ uint16_t payload_size;
+ uint16_t hdr_len;
+ uint16_t msg_type;
+ uint16_t src;
+ uint16_t dest_svc;
+ uint16_t src_port;
+ uint16_t dest_port;
+ uint32_t token;
+ uint32_t opcode;
+ void *payload;
+>>>>>>>
+};
+
+/* Bits 0 to 15 -- Minor version, Bits 16 to 31 -- Major version */
+#define APR_SVC_MAJOR_VERSION(v) ((v >> 16) & 0xFF)
+#define APR_SVC_MINOR_VERSION(v) (v & 0xFF)
+
+struct apr_device {
+ struct device dev;
+ uint16_t svc_id;
+ uint16_t domain_id;
+ uint32_t version;
+<<<<<<<
+=======
+ char name[APR_NAME_SIZE];
+>>>>>>>
+ spinlock_t lock;
+ struct list_head node;
+};
+
+#define to_apr_device(d) container_of(d, struct apr_device, dev)
+
+struct apr_driver {
+ int (*probe)(struct apr_device *sl);
+ int (*remove)(struct apr_device *sl);
+ int (*callback)(struct apr_device *a,
+<<<<<<<
+ struct apr_client_message *d);
+=======
+ struct apr_resp_pkt *d);
+>>>>>>>
+ struct device_driver driver;
+ const struct apr_device_id *id_table;
+};
+
+#define to_apr_driver(d) container_of(d, struct apr_driver, driver)
+
+/*
+ * use a macro to avoid include chaining to get THIS_MODULE
+ */
+#define apr_driver_register(drv) __apr_driver_register(drv, THIS_MODULE)
+
+int __apr_driver_register(struct apr_driver *drv, struct module *owner);
+void apr_driver_unregister(struct apr_driver *drv);
+
+/**
+ * module_apr_driver() - Helper macro for registering a aprbus driver
+ * @__aprbus_driver: aprbus_driver struct
+ *
+ * Helper macro for aprbus drivers which do not do anything special in
+ * module init/exit. This eliminates a lot of boilerplate. Each module
+ * may only use this macro once, and calling it replaces module_init()
+ * and module_exit()
+ */
+#define module_apr_driver(__apr_driver) \
+ module_driver(__apr_driver, apr_driver_register, \
+ apr_driver_unregister)
+
+<<<<<<<
+int apr_send_pkt(struct apr_device *adev, struct apr_pkt *pkt);
+=======
+int apr_send_pkt(struct apr_device *adev, void *buf);
+>>>>>>>
+
+#endif /* __QCOM_APR_H_ */
diff --git a/rr-cache/ef2ee746d553cafd4753c40a810c15f4701af2dc/thisimage b/rr-cache/ef2ee746d553cafd4753c40a810c15f4701af2dc/thisimage
new file mode 100644
index 0000000..a4d3e92
--- /dev/null
+++ b/rr-cache/ef2ee746d553cafd4753c40a810c15f4701af2dc/thisimage
@@ -0,0 +1,166 @@
+<<<<<<<
+/* SPDX-License-Identifier: GPL-2.0 */
+=======
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2011-2017, The Linux Foundation
+ * Copyright (c) 2018, Linaro Limited
+ */
+>>>>>>>
+
+#ifndef __QCOM_APR_H_
+#define __QCOM_APR_H_
+
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <dt-bindings/soc/qcom,apr.h>
+
+<<<<<<<
+=======
+extern struct bus_type aprbus;
+
+>>>>>>>
+#define APR_HDR_LEN(hdr_len) ((hdr_len)/4)
+
+/*
+ * HEADER field
+ * version:0:3
+ * header_size : 4:7
+ * message_type : 8:9
+ * reserved: 10:15
+ */
+#define APR_HDR_FIELD(msg_type, hdr_len, ver)\
+ (((msg_type & 0x3) << 8) | ((hdr_len & 0xF) << 4) | (ver & 0xF))
+
+#define APR_HDR_SIZE sizeof(struct apr_hdr)
+#define APR_SEQ_CMD_HDR_FIELD APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \
+ APR_HDR_LEN(APR_HDR_SIZE), \
+ APR_PKT_VER)
+/* Version */
+#define APR_PKT_VER 0x0
+
+/* Command and Response Types */
+#define APR_MSG_TYPE_EVENT 0x0
+#define APR_MSG_TYPE_CMD_RSP 0x1
+#define APR_MSG_TYPE_SEQ_CMD 0x2
+#define APR_MSG_TYPE_NSEQ_CMD 0x3
+#define APR_MSG_TYPE_MAX 0x04
+
+/* APR Basic Response Message */
+#define APR_BASIC_RSP_RESULT 0x000110E8
+#define APR_RSP_ACCEPTED 0x000100BE
+
+struct aprv2_ibasic_rsp_result_t {
+ uint32_t opcode;
+ uint32_t status;
+};
+
+/* hdr field Ver [0:3], Size [4:7], Message type [8:10] */
+#define APR_HDR_FIELD_VER(h) (h & 0x000F)
+#define APR_HDR_FIELD_SIZE(h) ((h & 0x00F0) >> 4)
+#define APR_HDR_FIELD_SIZE_BYTES(h) (((h & 0x00F0) >> 4) * 4)
+#define APR_HDR_FIELD_MT(h) ((h & 0x0300) >> 8)
+
+struct apr_hdr {
+ uint16_t hdr_field;
+ uint16_t pkt_size;
+ uint8_t src_svc;
+ uint8_t src_domain;
+ uint16_t src_port;
+ uint8_t dest_svc;
+ uint8_t dest_domain;
+ uint16_t dest_port;
+ uint32_t token;
+ uint32_t opcode;
+<<<<<<<
+} __packed;
+
+struct apr_pkt {
+ struct apr_hdr hdr;
+ uint8_t payload[];
+};
+
+struct apr_resp_pkt {
+ struct apr_hdr hdr;
+ void *payload;
+ int payload_size;
+=======
+};
+
+struct apr_client_message {
+ uint16_t payload_size;
+ uint16_t hdr_len;
+ uint16_t msg_type;
+ uint16_t src;
+ uint16_t dest_svc;
+ uint16_t src_port;
+ uint16_t dest_port;
+ uint32_t token;
+ uint32_t opcode;
+ void *payload;
+>>>>>>>
+};
+
+/* Bits 0 to 15 -- Minor version, Bits 16 to 31 -- Major version */
+#define APR_SVC_MAJOR_VERSION(v) ((v >> 16) & 0xFF)
+#define APR_SVC_MINOR_VERSION(v) (v & 0xFF)
+
+struct apr_device {
+ struct device dev;
+ uint16_t svc_id;
+ uint16_t domain_id;
+ uint32_t version;
+<<<<<<<
+=======
+ char name[APR_NAME_SIZE];
+>>>>>>>
+ spinlock_t lock;
+ struct list_head node;
+};
+
+#define to_apr_device(d) container_of(d, struct apr_device, dev)
+
+struct apr_driver {
+ int (*probe)(struct apr_device *sl);
+ int (*remove)(struct apr_device *sl);
+ int (*callback)(struct apr_device *a,
+<<<<<<<
+ struct apr_client_message *d);
+=======
+ struct apr_resp_pkt *d);
+>>>>>>>
+ struct device_driver driver;
+ const struct apr_device_id *id_table;
+};
+
+#define to_apr_driver(d) container_of(d, struct apr_driver, driver)
+
+/*
+ * use a macro to avoid include chaining to get THIS_MODULE
+ */
+#define apr_driver_register(drv) __apr_driver_register(drv, THIS_MODULE)
+
+int __apr_driver_register(struct apr_driver *drv, struct module *owner);
+void apr_driver_unregister(struct apr_driver *drv);
+
+/**
+ * module_apr_driver() - Helper macro for registering a aprbus driver
+ * @__aprbus_driver: aprbus_driver struct
+ *
+ * Helper macro for aprbus drivers which do not do anything special in
+ * module init/exit. This eliminates a lot of boilerplate. Each module
+ * may only use this macro once, and calling it replaces module_init()
+ * and module_exit()
+ */
+#define module_apr_driver(__apr_driver) \
+ module_driver(__apr_driver, apr_driver_register, \
+ apr_driver_unregister)
+
+<<<<<<<
+int apr_send_pkt(struct apr_device *adev, struct apr_pkt *pkt);
+=======
+int apr_send_pkt(struct apr_device *adev, void *buf);
+>>>>>>>
+
+#endif /* __QCOM_APR_H_ */
diff --git a/rr-cache/f42e7086d16b7cb3e0f87b3759f54abcba5f2201/postimage b/rr-cache/f42e7086d16b7cb3e0f87b3759f54abcba5f2201/postimage
new file mode 100644
index 0000000..cc77513
--- /dev/null
+++ b/rr-cache/f42e7086d16b7cb3e0f87b3759f54abcba5f2201/postimage
@@ -0,0 +1,744 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_NUMA_BALANCING=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_USER_NS=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_JUMP_LABEL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_ARCH_SUNXI=y
+CONFIG_ARCH_ALPINE=y
+CONFIG_ARCH_BCM2835=y
+CONFIG_ARCH_BCM_IPROC=y
+CONFIG_ARCH_BERLIN=y
+CONFIG_ARCH_BRCMSTB=y
+CONFIG_ARCH_EXYNOS=y
+CONFIG_ARCH_LAYERSCAPE=y
+CONFIG_ARCH_LG1K=y
+CONFIG_ARCH_HISI=y
+CONFIG_ARCH_MEDIATEK=y
+CONFIG_ARCH_MESON=y
+CONFIG_ARCH_MVEBU=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_ROCKCHIP=y
+CONFIG_ARCH_SEATTLE=y
+CONFIG_ARCH_RENESAS=y
+CONFIG_ARCH_R8A7795=y
+CONFIG_ARCH_R8A7796=y
+CONFIG_ARCH_R8A77965=y
+CONFIG_ARCH_R8A77970=y
+CONFIG_ARCH_R8A77980=y
+CONFIG_ARCH_R8A77990=y
+CONFIG_ARCH_R8A77995=y
+CONFIG_ARCH_STRATIX10=y
+CONFIG_ARCH_TEGRA=y
+CONFIG_ARCH_SPRD=y
+CONFIG_ARCH_SYNQUACER=y
+CONFIG_ARCH_THUNDER=y
+CONFIG_ARCH_THUNDER2=y
+CONFIG_ARCH_UNIPHIER=y
+CONFIG_ARCH_VEXPRESS=y
+CONFIG_ARCH_XGENE=y
+CONFIG_ARCH_ZX=y
+CONFIG_ARCH_ZYNQMP=y
+CONFIG_PCI=y
+CONFIG_HOTPLUG_PCI_PCIE=y
+CONFIG_PCI_IOV=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_ACPI=y
+CONFIG_PCI_LAYERSCAPE=y
+CONFIG_PCI_HISI=y
+CONFIG_PCIE_QCOM=y
+CONFIG_PCIE_KIRIN=y
+CONFIG_PCIE_ARMADA_8K=y
+CONFIG_PCIE_HISI_STB=y
+CONFIG_PCI_AARDVARK=y
+CONFIG_PCI_TEGRA=y
+CONFIG_PCIE_RCAR=y
+CONFIG_PCIE_ROCKCHIP=y
+CONFIG_PCIE_ROCKCHIP_HOST=m
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PCI_XGENE=y
+CONFIG_PCI_HOST_THUNDER_PEM=y
+CONFIG_PCI_HOST_THUNDER_ECAM=y
+CONFIG_ARM64_VA_BITS_48=y
+CONFIG_SCHED_MC=y
+CONFIG_NUMA=y
+CONFIG_PREEMPT=y
+CONFIG_KSM=y
+CONFIG_MEMORY_FAILURE=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CMA=y
+CONFIG_SECCOMP=y
+CONFIG_KEXEC=y
+CONFIG_CRASH_DUMP=y
+CONFIG_XEN=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_COMPAT=y
+CONFIG_HIBERNATION=y
+CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_ATTR_SET=y
+CONFIG_CPU_FREQ_GOV_COMMON=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=m
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_CPUFREQ_DT=y
+CONFIG_ARM_ARMADA_37XX_CPUFREQ=y
+CONFIG_ARM_BIG_LITTLE_CPUFREQ=y
+CONFIG_ARM_SCPI_CPUFREQ=y
+CONFIG_ARM_TEGRA186_CPUFREQ=y
+CONFIG_ACPI_CPPC_CPUFREQ=m
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IPV6=m
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_NAT=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_BRIDGE=m
+CONFIG_BRIDGE_VLAN_FILTERING=y
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_VLAN_8021Q_MVRP=y
+CONFIG_BPF_JIT=y
+CONFIG_BT=m
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=m
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=m
+# CONFIG_BT_HS is not set
+# CONFIG_BT_LE is not set
+CONFIG_BT_LEDS=y
+# CONFIG_BT_DEBUGFS is not set
+CONFIG_BT_HCIBTUSB=m
+CONFIG_BT_HCIBTSDIO=m
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_BCSP=y
+CONFIG_BT_HCIUART_LL=y
+CONFIG_BT_HCIUART_3WIRE=y
+CONFIG_BT_HCIUART_BCM=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_WEXT=y
+CONFIG_MAC80211=y
+CONFIG_MAC80211_LEDS=y
+CONFIG_RFKILL=y
+CONFIG_WCN36XX=m
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DMA_CMA=y
+CONFIG_SIMPLE_PM_BUS=y
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_DENALI_DT=y
+CONFIG_MTD_NAND_MARVELL=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_NBD=m
+CONFIG_VIRTIO_BLK=y
+CONFIG_BLK_DEV_NVME=m
+CONFIG_QCOM_COINCELL=m
+CONFIG_SRAM=y
+CONFIG_EEPROM_AT25=m
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+CONFIG_SCSI_SAS_ATA=y
+CONFIG_SCSI_HISI_SAS=y
+CONFIG_SCSI_HISI_SAS_PCI=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_AHCI_CEVA=y
+CONFIG_AHCI_MVEBU=y
+CONFIG_AHCI_XGENE=y
+CONFIG_AHCI_QORIQ=y
+CONFIG_SATA_SIL24=y
+CONFIG_SATA_RCAR=y
+CONFIG_PATA_PLATFORM=y
+CONFIG_PATA_OF_PLATFORM=y
+CONFIG_NETDEVICES=y
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_TUN=y
+CONFIG_VETH=m
+CONFIG_VIRTIO_NET=y
+CONFIG_AMD_XGBE=y
+CONFIG_NET_XGENE=y
+CONFIG_ATL1C=y
+CONFIG_MACB=y
+CONFIG_THUNDER_NIC_PF=y
+CONFIG_HIX5HD2_GMAC=y
+CONFIG_HNS_DSAF=y
+CONFIG_HNS_ENET=y
+CONFIG_E1000E=y
+CONFIG_IGB=y
+CONFIG_IGBVF=y
+CONFIG_MVNETA=y
+CONFIG_MVPP2=y
+CONFIG_SKY2=y
+CONFIG_QCOM_EMAC=m
+CONFIG_RAVB=y
+CONFIG_SMC91X=y
+CONFIG_SMSC911X=y
+CONFIG_SNI_AVE=y
+CONFIG_SNI_NETSEC=y
+CONFIG_STMMAC_ETH=m
+CONFIG_DWMAC_IPQ806X=m
+CONFIG_DWMAC_MESON=m
+CONFIG_DWMAC_ROCKCHIP=m
+CONFIG_DWMAC_SUNXI=m
+CONFIG_DWMAC_SUN8I=m
+CONFIG_MDIO_BUS_MUX_MMIOREG=y
+CONFIG_AT803X_PHY=m
+CONFIG_MARVELL_PHY=m
+CONFIG_MARVELL_10G_PHY=m
+CONFIG_MESON_GXL_PHY=m
+CONFIG_MICREL_PHY=y
+CONFIG_REALTEK_PHY=m
+CONFIG_ROCKCHIP_PHY=y
+CONFIG_USB_PEGASUS=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_RTL8152=m
+CONFIG_USB_LAN78XX=m
+CONFIG_USB_USBNET=m
+CONFIG_USB_NET_DM9601=m
+CONFIG_USB_NET_SR9800=m
+CONFIG_USB_NET_SMSC75XX=m
+CONFIG_USB_NET_SMSC95XX=m
+CONFIG_USB_NET_PLUSB=m
+CONFIG_USB_NET_MCS7830=m
+CONFIG_ATH10K=y
+CONFIG_ATH10K_PCI=y
+CONFIG_BRCMFMAC=m
+CONFIG_MWIFIEX=m
+CONFIG_MWIFIEX_PCIE=m
+CONFIG_WL18XX=m
+CONFIG_WLCORE_SDIO=m
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_ADC=m
+CONFIG_KEYBOARD_CROS_EC=y
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ATMEL_MXT=m
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_PM8941_PWRKEY=y
+CONFIG_INPUT_HISI_POWERKEY=y
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_AMBAKMI=y
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_BCM2835AUX=y
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_8250_MT6577=y
+CONFIG_SERIAL_8250_UNIPHIER=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_MESON=y
+CONFIG_SERIAL_MESON_CONSOLE=y
+CONFIG_SERIAL_SAMSUNG=y
+CONFIG_SERIAL_SAMSUNG_CONSOLE=y
+CONFIG_SERIAL_TEGRA=y
+CONFIG_SERIAL_SH_SCI=y
+CONFIG_SERIAL_SH_SCI_NR_UARTS=11
+CONFIG_SERIAL_SH_SCI_CONSOLE=y
+CONFIG_SERIAL_MSM=y
+CONFIG_SERIAL_QCOM_GENI=y
+CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_SERIAL_XILINX_PS_UART=y
+CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
+CONFIG_SERIAL_MVEBU_UART=y
+CONFIG_SERIAL_DEV_BUS=y
+CONFIG_SERIAL_DEV_CTRL_TTYPORT=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_I2C_HID=m
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_MUX_PCA954x=y
+CONFIG_I2C_BCM2835=m
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
+CONFIG_I2C_IMX=y
+CONFIG_I2C_MESON=y
+CONFIG_I2C_MV64XXX=y
+CONFIG_I2C_PXA=y
+CONFIG_I2C_QUP=y
+CONFIG_I2C_RK3X=y
+CONFIG_I2C_SH_MOBILE=y
+CONFIG_I2C_TEGRA=y
+CONFIG_I2C_UNIPHIER_F=y
+CONFIG_I2C_RCAR=y
+CONFIG_I2C_CROS_EC_TUNNEL=y
+CONFIG_SPI=y
+CONFIG_SPI_ARMADA_3700=y
+CONFIG_SPI_MESON_SPICC=m
+CONFIG_SPI_MESON_SPIFC=m
+CONFIG_SPI_BCM2835=m
+CONFIG_SPI_BCM2835AUX=m
+CONFIG_SPI_ORION=y
+CONFIG_SPI_PL022=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_ROCKCHIP=y
+CONFIG_SPI_S3C64XX=y
+CONFIG_SPI_SPIDEV=m
+CONFIG_SPMI=y
+CONFIG_PINCTRL_IPQ8074=y
+CONFIG_PINCTRL_SINGLE=y
+CONFIG_PINCTRL_MAX77620=y
+CONFIG_PINCTRL_MSM8916=y
+CONFIG_PINCTRL_MSM8994=y
+CONFIG_PINCTRL_MSM8996=y
+CONFIG_PINCTRL_MT7622=y
+CONFIG_PINCTRL_SDM845=y
+CONFIG_PINCTRL_QDF2XXX=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_GPIO_DWAPB=y
+CONFIG_GPIO_MB86S7X=y
+CONFIG_GPIO_PL061=y
+CONFIG_GPIO_RCAR=y
+CONFIG_GPIO_UNIPHIER=y
+CONFIG_GPIO_XGENE=y
+CONFIG_GPIO_XGENE_SB=y
+CONFIG_GPIO_PCA953X=y
+CONFIG_GPIO_PCA953X_IRQ=y
+CONFIG_GPIO_MAX77620=y
+CONFIG_POWER_AVS=y
+CONFIG_ROCKCHIP_IODOMAIN=y
+CONFIG_POWER_RESET_MSM=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_SYSCON_REBOOT_MODE=y
+CONFIG_BATTERY_BQ27XXX=y
+CONFIG_SENSORS_ARM_SCPI=y
+CONFIG_SENSORS_LM90=m
+CONFIG_SENSORS_INA2XX=m
+CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
+CONFIG_CPU_THERMAL=y
+CONFIG_THERMAL_EMULATION=y
+CONFIG_ARMADA_THERMAL=y
+CONFIG_BRCMSTB_THERMAL=m
+CONFIG_EXYNOS_THERMAL=y
+CONFIG_RCAR_GEN3_THERMAL=y
+CONFIG_QCOM_TSENS=y
+CONFIG_ROCKCHIP_THERMAL=m
+CONFIG_TEGRA_BPMP_THERMAL=m
+CONFIG_QCOM_SPMI_TEMP_ALARM=m
+CONFIG_UNIPHIER_THERMAL=y
+CONFIG_WATCHDOG=y
+CONFIG_S3C2410_WATCHDOG=y
+CONFIG_QCOM_WDT=m
+CONFIG_MESON_GXBB_WATCHDOG=m
+CONFIG_MESON_WATCHDOG=m
+CONFIG_RENESAS_WDT=y
+CONFIG_UNIPHIER_WATCHDOG=y
+CONFIG_BCM2835_WDT=y
+CONFIG_MFD_AXP20X_RSB=y
+CONFIG_MFD_CROS_EC=y
+CONFIG_MFD_CROS_EC_I2C=y
+CONFIG_MFD_CROS_EC_SPI=y
+CONFIG_MFD_CROS_EC_CHARDEV=m
+CONFIG_MFD_EXYNOS_LPASS=m
+CONFIG_MFD_HI6421_PMIC=y
+CONFIG_MFD_HI655X_PMIC=y
+CONFIG_MFD_MAX77620=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_RK808=y
+CONFIG_MFD_SEC_CORE=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_AXP20X=y
+CONFIG_REGULATOR_FAN53555=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_GPIO=y
+CONFIG_REGULATOR_HI6421V530=y
+CONFIG_REGULATOR_HI655X=y
+CONFIG_REGULATOR_MAX77620=y
+CONFIG_REGULATOR_PWM=y
+CONFIG_REGULATOR_QCOM_SMD_RPM=y
+CONFIG_REGULATOR_QCOM_SPMI=y
+CONFIG_REGULATOR_RK808=y
+CONFIG_REGULATOR_S2MPS11=y
+CONFIG_MEDIA_SUPPORT=m
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_ANALOG_TV_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_MEDIA_RC_SUPPORT=y
+CONFIG_RC_CORE=m
+CONFIG_RC_DEVICES=y
+CONFIG_RC_DECODERS=y
+CONFIG_IR_MESON=m
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+# CONFIG_DVB_NET is not set
+CONFIG_V4L_MEM2MEM_DRIVERS=y
+CONFIG_VIDEO_SAMSUNG_S5P_JPEG=m
+CONFIG_VIDEO_SAMSUNG_S5P_MFC=m
+CONFIG_VIDEO_SAMSUNG_EXYNOS_GSC=m
+CONFIG_VIDEO_RENESAS_FCP=m
+CONFIG_VIDEO_RENESAS_VSP1=m
+CONFIG_DRM=y
+CONFIG_DRM_NOUVEAU=m
+CONFIG_DRM_EXYNOS=m
+CONFIG_DRM_EXYNOS5433_DECON=y
+CONFIG_DRM_EXYNOS7_DECON=y
+CONFIG_DRM_EXYNOS_DSI=y
+# CONFIG_DRM_EXYNOS_DP is not set
+CONFIG_DRM_EXYNOS_HDMI=y
+CONFIG_DRM_EXYNOS_MIC=y
+CONFIG_DRM_ROCKCHIP=m
+CONFIG_ROCKCHIP_ANALOGIX_DP=y
+CONFIG_ROCKCHIP_CDN_DP=y
+CONFIG_ROCKCHIP_DW_HDMI=y
+CONFIG_ROCKCHIP_DW_MIPI_DSI=y
+CONFIG_ROCKCHIP_INNO_HDMI=y
+CONFIG_DRM_RCAR_DU=m
+CONFIG_DRM_RCAR_LVDS=y
+CONFIG_DRM_RCAR_VSP=y
+CONFIG_DRM_TEGRA=m
+CONFIG_DRM_PANEL_SIMPLE=m
+CONFIG_DRM_I2C_ADV7511=m
+CONFIG_DRM_I2C_ADV7511_AUDIO=y
+CONFIG_DRM_VC4=m
+CONFIG_DRM_HISI_HIBMC=m
+CONFIG_DRM_HISI_KIRIN=m
+CONFIG_DRM_MESON=m
+CONFIG_FB=y
+CONFIG_FB_ARMCLCD=y
+CONFIG_BACKLIGHT_GENERIC=m
+CONFIG_BACKLIGHT_PWM=m
+CONFIG_BACKLIGHT_LP855X=m
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SND_BCM2835_SOC_I2S=m
+CONFIG_SND_SOC_QCOM=y
+CONFIG_SND_SOC_APQ8016_SBC=y
+CONFIG_SND_SOC_QDSP6=y
+CONFIG_SND_SOC_MSM8996=y
+CONFIG_SND_SOC_SAMSUNG=y
+CONFIG_SND_SOC_RCAR=m
+CONFIG_SND_SOC_AK4613=m
+CONFIG_SND_SIMPLE_CARD=y
+CONFIG_SND_AUDIO_GRAPH_CARD=m
+CONFIG_SND_SOC_MSM8916_WCD_ANALOG=y
+CONFIG_SND_SOC_MSM8916_WCD_DIGITAL=y
+CONFIG_USB=y
+CONFIG_USB_OTG=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_TEGRA=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_EXYNOS=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_EXYNOS=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_RENESAS_USBHS=m
+CONFIG_USB_STORAGE=y
+CONFIG_USB_MUSB_HDRC=y
+CONFIG_USB_MUSB_SUNXI=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC2=y
+CONFIG_USB_CHIPIDEA=y
+CONFIG_USB_CHIPIDEA_UDC=y
+CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_USB_CHIPIDEA_ULPI=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_HSIC_USB3503=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_ULPI=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_RENESAS_USBHS_UDC=m
+CONFIG_USB_RENESAS_USB3=m
+CONFIG_USB_ULPI_BUS=y
+CONFIG_MMC=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_ARMMMCI=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_ACPI=y
+CONFIG_MMC_SDHCI_F_SDH30=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_OF_ARASAN=y
+CONFIG_MMC_SDHCI_OF_ESDHC=y
+CONFIG_MMC_SDHCI_CADENCE=y
+CONFIG_MMC_SDHCI_TEGRA=y
+CONFIG_MMC_MESON_GX=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_SPI=y
+CONFIG_MMC_SDHI=y
+CONFIG_MMC_DW=y
+CONFIG_MMC_DW_EXYNOS=y
+CONFIG_MMC_DW_HI3798CV200=y
+CONFIG_MMC_DW_K3=y
+CONFIG_MMC_DW_ROCKCHIP=y
+CONFIG_MMC_SUNXI=y
+CONFIG_MMC_BCM2835=y
+CONFIG_MMC_SDHCI_XENON=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_PWM=y
+CONFIG_LEDS_SYSCON=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_LEDS_TRIGGER_PANIC=y
+CONFIG_LEDS_TRIGGER_DISK=y
+CONFIG_EDAC=y
+CONFIG_EDAC_GHES=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_MAX77686=y
+CONFIG_RTC_DRV_RK808=m
+CONFIG_RTC_DRV_S5M=y
+CONFIG_RTC_DRV_DS3232=y
+CONFIG_RTC_DRV_EFI=y
+CONFIG_RTC_DRV_S3C=y
+CONFIG_RTC_DRV_PL031=y
+CONFIG_RTC_DRV_SUN6I=y
+CONFIG_RTC_DRV_ARMADA38X=y
+CONFIG_RTC_DRV_PM8XXX=m
+CONFIG_RTC_DRV_TEGRA=y
+CONFIG_RTC_DRV_XGENE=y
+CONFIG_RTC_DRV_CROS_EC=y
+CONFIG_DMADEVICES=y
+CONFIG_DMA_BCM2835=m
+CONFIG_K3_DMA=y
+CONFIG_MV_XOR_V2=y
+CONFIG_PL330_DMA=y
+CONFIG_TEGRA20_APB_DMA=y
+CONFIG_QCOM_BAM_DMA=y
+CONFIG_QCOM_HIDMA_MGMT=y
+CONFIG_QCOM_HIDMA=y
+CONFIG_RCAR_DMAC=y
+CONFIG_RENESAS_USB_DMAC=m
+CONFIG_VFIO=y
+CONFIG_VFIO_PCI=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_XEN_GNTDEV=y
+CONFIG_XEN_GRANT_DEV_ALLOC=y
+CONFIG_COMMON_CLK_RK808=y
+CONFIG_COMMON_CLK_SCPI=y
+CONFIG_COMMON_CLK_CS2000_CP=y
+CONFIG_COMMON_CLK_S2MPS11=y
+CONFIG_CLK_QORIQ=y
+CONFIG_COMMON_CLK_PWM=y
+CONFIG_COMMON_CLK_QCOM=y
+CONFIG_QCOM_CLK_SMD_RPM=y
+CONFIG_IPQ_GCC_8074=y
+CONFIG_MSM_GCC_8916=y
+CONFIG_MSM_GCC_8994=y
+CONFIG_MSM_MMCC_8996=y
+CONFIG_HWSPINLOCK=y
+CONFIG_SDM_GCC_845=y
+CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_ARM_MHU=y
+CONFIG_PLATFORM_MHU=y
+CONFIG_BCM2835_MBOX=y
+CONFIG_HI6220_MBOX=y
+CONFIG_QCOM_APCS_IPC=y
+CONFIG_ROCKCHIP_IOMMU=y
+CONFIG_TEGRA_IOMMU_SMMU=y
+CONFIG_ARM_SMMU=y
+CONFIG_ARM_SMMU_V3=y
+CONFIG_QCOM_IOMMU=y
+CONFIG_REMOTEPROC=y
+CONFIG_QCOM_ADSP_PIL=y
+CONFIG_QCOM_Q6V5_PIL=y
+CONFIG_QCOM_WCNSS_PIL=y
+CONFIG_RPMSG_QCOM_GLINK_RPM=y
+CONFIG_RPMSG_QCOM_GLINK_SMEM=y
+CONFIG_RPMSG_QCOM_SMD=y
+CONFIG_RASPBERRYPI_POWER=y
+CONFIG_QCOM_SMEM=y
+CONFIG_QCOM_SMD_RPM=y
+CONFIG_QCOM_SMP2P=y
+CONFIG_QCOM_SMSM=y
+CONFIG_QCOM_WCNSS_CTRL=y
+CONFIG_QCOM_APR=y
+CONFIG_ROCKCHIP_PM_DOMAINS=y
+CONFIG_ARCH_TEGRA_132_SOC=y
+CONFIG_ARCH_TEGRA_210_SOC=y
+CONFIG_ARCH_TEGRA_186_SOC=y
+CONFIG_ARCH_TEGRA_194_SOC=y
+CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
+CONFIG_EXTCON_USB_GPIO=y
+CONFIG_EXTCON_USBC_CROS_EC=y
+CONFIG_MEMORY=y
+CONFIG_TEGRA_MC=y
+CONFIG_IIO=y
+CONFIG_EXYNOS_ADC=y
+CONFIG_ROCKCHIP_SARADC=m
+CONFIG_IIO_CROS_EC_SENSORS_CORE=m
+CONFIG_IIO_CROS_EC_SENSORS=m
+CONFIG_IIO_CROS_EC_LIGHT_PROX=m
+CONFIG_IIO_CROS_EC_BARO=m
+CONFIG_PWM=y
+CONFIG_PWM_BCM2835=m
+CONFIG_PWM_CROS_EC=m
+CONFIG_PWM_MESON=m
+CONFIG_PWM_RCAR=m
+CONFIG_PWM_ROCKCHIP=y
+CONFIG_PWM_SAMSUNG=y
+CONFIG_PWM_TEGRA=m
+CONFIG_PHY_HISTB_COMBPHY=y
+CONFIG_PHY_HISI_INNO_USB2=y
+CONFIG_PHY_RCAR_GEN3_USB2=y
+CONFIG_PHY_RCAR_GEN3_USB3=m
+CONFIG_PHY_HI6220_USB=y
+CONFIG_PHY_QCOM_QMP=y
+CONFIG_PHY_QCOM_USB_HS=y
+CONFIG_PHY_SUN4I_USB=y
+CONFIG_PHY_MVEBU_CP110_COMPHY=y
+CONFIG_PHY_QCOM_QMP=m
+CONFIG_PHY_ROCKCHIP_INNO_USB2=y
+CONFIG_PHY_ROCKCHIP_EMMC=y
+CONFIG_PHY_ROCKCHIP_PCIE=m
+CONFIG_PHY_ROCKCHIP_TYPEC=y
+CONFIG_PHY_XGENE=y
+CONFIG_PHY_TEGRA_XUSB=y
+CONFIG_QCOM_L2_PMU=y
+CONFIG_QCOM_L3_PMU=y
+CONFIG_MESON_EFUSE=m
+CONFIG_QCOM_QFPROM=y
+CONFIG_ROCKCHIP_EFUSE=y
+CONFIG_UNIPHIER_EFUSE=y
+CONFIG_TEE=y
+CONFIG_OPTEE=y
+CONFIG_ARM_SCPI_PROTOCOL=y
+CONFIG_RASPBERRYPI_FIRMWARE=y
+CONFIG_EFI_CAPSULE_LOADER=y
+CONFIG_ACPI=y
+CONFIG_ACPI_APEI=y
+CONFIG_ACPI_APEI_GHES=y
+CONFIG_ACPI_APEI_PCIEAER=y
+CONFIG_ACPI_APEI_MEMORY_FAILURE=y
+CONFIG_ACPI_APEI_EINJ=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
+CONFIG_QUOTA=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
+CONFIG_VFAT_FS=y
+CONFIG_HUGETLBFS=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_EFIVAR_FS=y
+CONFIG_SQUASHFS=y
+CONFIG_UFS_FS=y
+CONFIG_UFS_FS_WRITE=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
+CONFIG_9P_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_LOCKUP_DETECTOR=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_FTRACE is not set
+CONFIG_MEMTEST=y
+CONFIG_SECURITY=y
+CONFIG_CRYPTO_ECHAINIV=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA256_ARM64=m
+CONFIG_CRYPTO_SHA512_ARM64=m
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m
+CONFIG_CRYPTO_CRC32_ARM64_CE=m
+CONFIG_CRYPTO_AES_ARM64=m
+CONFIG_CRYPTO_AES_ARM64_CE=m
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=m
+CONFIG_CRYPTO_CHACHA20_NEON=m
+CONFIG_CRYPTO_AES_ARM64_BS=m
+CONFIG_CRYPTO_SHA512_ARM64_CE=m
+CONFIG_CRYPTO_SHA3_ARM64=m
+CONFIG_CRYPTO_SM3_ARM64_CE=m
diff --git a/rr-cache/f42e7086d16b7cb3e0f87b3759f54abcba5f2201/preimage b/rr-cache/f42e7086d16b7cb3e0f87b3759f54abcba5f2201/preimage
new file mode 100644
index 0000000..6bd883c
--- /dev/null
+++ b/rr-cache/f42e7086d16b7cb3e0f87b3759f54abcba5f2201/preimage
@@ -0,0 +1,747 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_NUMA_BALANCING=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_USER_NS=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_JUMP_LABEL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_ARCH_SUNXI=y
+CONFIG_ARCH_ALPINE=y
+CONFIG_ARCH_BCM2835=y
+CONFIG_ARCH_BCM_IPROC=y
+CONFIG_ARCH_BERLIN=y
+CONFIG_ARCH_BRCMSTB=y
+CONFIG_ARCH_EXYNOS=y
+CONFIG_ARCH_LAYERSCAPE=y
+CONFIG_ARCH_LG1K=y
+CONFIG_ARCH_HISI=y
+CONFIG_ARCH_MEDIATEK=y
+CONFIG_ARCH_MESON=y
+CONFIG_ARCH_MVEBU=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_ROCKCHIP=y
+CONFIG_ARCH_SEATTLE=y
+CONFIG_ARCH_RENESAS=y
+CONFIG_ARCH_R8A7795=y
+CONFIG_ARCH_R8A7796=y
+CONFIG_ARCH_R8A77965=y
+CONFIG_ARCH_R8A77970=y
+CONFIG_ARCH_R8A77980=y
+CONFIG_ARCH_R8A77990=y
+CONFIG_ARCH_R8A77995=y
+CONFIG_ARCH_STRATIX10=y
+CONFIG_ARCH_TEGRA=y
+CONFIG_ARCH_SPRD=y
+CONFIG_ARCH_SYNQUACER=y
+CONFIG_ARCH_THUNDER=y
+CONFIG_ARCH_THUNDER2=y
+CONFIG_ARCH_UNIPHIER=y
+CONFIG_ARCH_VEXPRESS=y
+CONFIG_ARCH_XGENE=y
+CONFIG_ARCH_ZX=y
+CONFIG_ARCH_ZYNQMP=y
+CONFIG_PCI=y
+CONFIG_HOTPLUG_PCI_PCIE=y
+CONFIG_PCI_IOV=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_ACPI=y
+CONFIG_PCI_LAYERSCAPE=y
+CONFIG_PCI_HISI=y
+CONFIG_PCIE_QCOM=y
+CONFIG_PCIE_KIRIN=y
+CONFIG_PCIE_ARMADA_8K=y
+CONFIG_PCIE_HISI_STB=y
+CONFIG_PCI_AARDVARK=y
+CONFIG_PCI_TEGRA=y
+CONFIG_PCIE_RCAR=y
+CONFIG_PCIE_ROCKCHIP=y
+CONFIG_PCIE_ROCKCHIP_HOST=m
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PCI_XGENE=y
+CONFIG_PCI_HOST_THUNDER_PEM=y
+CONFIG_PCI_HOST_THUNDER_ECAM=y
+CONFIG_ARM64_VA_BITS_48=y
+CONFIG_SCHED_MC=y
+CONFIG_NUMA=y
+CONFIG_PREEMPT=y
+CONFIG_KSM=y
+CONFIG_MEMORY_FAILURE=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CMA=y
+CONFIG_SECCOMP=y
+CONFIG_KEXEC=y
+CONFIG_CRASH_DUMP=y
+CONFIG_XEN=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_COMPAT=y
+CONFIG_HIBERNATION=y
+CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_ATTR_SET=y
+CONFIG_CPU_FREQ_GOV_COMMON=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=m
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_CPUFREQ_DT=y
+CONFIG_ARM_ARMADA_37XX_CPUFREQ=y
+CONFIG_ARM_BIG_LITTLE_CPUFREQ=y
+CONFIG_ARM_SCPI_CPUFREQ=y
+CONFIG_ARM_TEGRA186_CPUFREQ=y
+CONFIG_ACPI_CPPC_CPUFREQ=m
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IPV6=m
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_NAT=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_BRIDGE=m
+CONFIG_BRIDGE_VLAN_FILTERING=y
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_VLAN_8021Q_MVRP=y
+CONFIG_BPF_JIT=y
+CONFIG_BT=m
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=m
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=m
+# CONFIG_BT_HS is not set
+# CONFIG_BT_LE is not set
+CONFIG_BT_LEDS=y
+# CONFIG_BT_DEBUGFS is not set
+CONFIG_BT_HCIBTUSB=m
+CONFIG_BT_HCIBTSDIO=m
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_BCSP=y
+CONFIG_BT_HCIUART_LL=y
+CONFIG_BT_HCIUART_3WIRE=y
+CONFIG_BT_HCIUART_BCM=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_WEXT=y
+CONFIG_MAC80211=y
+CONFIG_MAC80211_LEDS=y
+CONFIG_RFKILL=y
+CONFIG_WCN36XX=m
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DMA_CMA=y
+CONFIG_SIMPLE_PM_BUS=y
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_DENALI_DT=y
+CONFIG_MTD_NAND_MARVELL=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_NBD=m
+CONFIG_VIRTIO_BLK=y
+CONFIG_BLK_DEV_NVME=m
+CONFIG_QCOM_COINCELL=m
+CONFIG_SRAM=y
+CONFIG_EEPROM_AT25=m
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+CONFIG_SCSI_SAS_ATA=y
+CONFIG_SCSI_HISI_SAS=y
+CONFIG_SCSI_HISI_SAS_PCI=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_AHCI_CEVA=y
+CONFIG_AHCI_MVEBU=y
+CONFIG_AHCI_XGENE=y
+CONFIG_AHCI_QORIQ=y
+CONFIG_SATA_SIL24=y
+CONFIG_SATA_RCAR=y
+CONFIG_PATA_PLATFORM=y
+CONFIG_PATA_OF_PLATFORM=y
+CONFIG_NETDEVICES=y
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_TUN=y
+CONFIG_VETH=m
+CONFIG_VIRTIO_NET=y
+CONFIG_AMD_XGBE=y
+CONFIG_NET_XGENE=y
+CONFIG_ATL1C=y
+CONFIG_MACB=y
+CONFIG_THUNDER_NIC_PF=y
+CONFIG_HIX5HD2_GMAC=y
+CONFIG_HNS_DSAF=y
+CONFIG_HNS_ENET=y
+CONFIG_E1000E=y
+CONFIG_IGB=y
+CONFIG_IGBVF=y
+CONFIG_MVNETA=y
+CONFIG_MVPP2=y
+CONFIG_SKY2=y
+CONFIG_QCOM_EMAC=m
+CONFIG_RAVB=y
+CONFIG_SMC91X=y
+CONFIG_SMSC911X=y
+CONFIG_SNI_AVE=y
+CONFIG_SNI_NETSEC=y
+CONFIG_STMMAC_ETH=m
+CONFIG_DWMAC_IPQ806X=m
+CONFIG_DWMAC_MESON=m
+CONFIG_DWMAC_ROCKCHIP=m
+CONFIG_DWMAC_SUNXI=m
+CONFIG_DWMAC_SUN8I=m
+CONFIG_MDIO_BUS_MUX_MMIOREG=y
+CONFIG_AT803X_PHY=m
+CONFIG_MARVELL_PHY=m
+CONFIG_MARVELL_10G_PHY=m
+CONFIG_MESON_GXL_PHY=m
+CONFIG_MICREL_PHY=y
+CONFIG_REALTEK_PHY=m
+CONFIG_ROCKCHIP_PHY=y
+CONFIG_USB_PEGASUS=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_RTL8152=m
+CONFIG_USB_LAN78XX=m
+CONFIG_USB_USBNET=m
+CONFIG_USB_NET_DM9601=m
+CONFIG_USB_NET_SR9800=m
+CONFIG_USB_NET_SMSC75XX=m
+CONFIG_USB_NET_SMSC95XX=m
+CONFIG_USB_NET_PLUSB=m
+CONFIG_USB_NET_MCS7830=m
+CONFIG_ATH10K=y
+CONFIG_ATH10K_PCI=y
+CONFIG_BRCMFMAC=m
+CONFIG_MWIFIEX=m
+CONFIG_MWIFIEX_PCIE=m
+CONFIG_WL18XX=m
+CONFIG_WLCORE_SDIO=m
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_ADC=m
+CONFIG_KEYBOARD_CROS_EC=y
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ATMEL_MXT=m
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_PM8941_PWRKEY=y
+CONFIG_INPUT_HISI_POWERKEY=y
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_AMBAKMI=y
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_BCM2835AUX=y
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_8250_MT6577=y
+CONFIG_SERIAL_8250_UNIPHIER=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_MESON=y
+CONFIG_SERIAL_MESON_CONSOLE=y
+CONFIG_SERIAL_SAMSUNG=y
+CONFIG_SERIAL_SAMSUNG_CONSOLE=y
+CONFIG_SERIAL_TEGRA=y
+CONFIG_SERIAL_SH_SCI=y
+CONFIG_SERIAL_SH_SCI_NR_UARTS=11
+CONFIG_SERIAL_SH_SCI_CONSOLE=y
+CONFIG_SERIAL_MSM=y
+CONFIG_SERIAL_QCOM_GENI=y
+CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_SERIAL_XILINX_PS_UART=y
+CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
+CONFIG_SERIAL_MVEBU_UART=y
+CONFIG_SERIAL_DEV_BUS=y
+CONFIG_SERIAL_DEV_CTRL_TTYPORT=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_I2C_HID=m
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_MUX_PCA954x=y
+CONFIG_I2C_BCM2835=m
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
+CONFIG_I2C_IMX=y
+CONFIG_I2C_MESON=y
+CONFIG_I2C_MV64XXX=y
+CONFIG_I2C_PXA=y
+CONFIG_I2C_QUP=y
+CONFIG_I2C_RK3X=y
+CONFIG_I2C_SH_MOBILE=y
+CONFIG_I2C_TEGRA=y
+CONFIG_I2C_UNIPHIER_F=y
+CONFIG_I2C_RCAR=y
+CONFIG_I2C_CROS_EC_TUNNEL=y
+CONFIG_SPI=y
+CONFIG_SPI_ARMADA_3700=y
+CONFIG_SPI_MESON_SPICC=m
+CONFIG_SPI_MESON_SPIFC=m
+CONFIG_SPI_BCM2835=m
+CONFIG_SPI_BCM2835AUX=m
+CONFIG_SPI_ORION=y
+CONFIG_SPI_PL022=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_ROCKCHIP=y
+CONFIG_SPI_S3C64XX=y
+CONFIG_SPI_SPIDEV=m
+CONFIG_SPMI=y
+CONFIG_PINCTRL_IPQ8074=y
+CONFIG_PINCTRL_SINGLE=y
+CONFIG_PINCTRL_MAX77620=y
+CONFIG_PINCTRL_MSM8916=y
+CONFIG_PINCTRL_MSM8994=y
+CONFIG_PINCTRL_MSM8996=y
+<<<<<<<
+CONFIG_PINCTRL_MT7622=y
+=======
+CONFIG_PINCTRL_SDM845=y
+>>>>>>>
+CONFIG_PINCTRL_QDF2XXX=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_GPIO_DWAPB=y
+CONFIG_GPIO_MB86S7X=y
+CONFIG_GPIO_PL061=y
+CONFIG_GPIO_RCAR=y
+CONFIG_GPIO_UNIPHIER=y
+CONFIG_GPIO_XGENE=y
+CONFIG_GPIO_XGENE_SB=y
+CONFIG_GPIO_PCA953X=y
+CONFIG_GPIO_PCA953X_IRQ=y
+CONFIG_GPIO_MAX77620=y
+CONFIG_POWER_AVS=y
+CONFIG_ROCKCHIP_IODOMAIN=y
+CONFIG_POWER_RESET_MSM=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_SYSCON_REBOOT_MODE=y
+CONFIG_BATTERY_BQ27XXX=y
+CONFIG_SENSORS_ARM_SCPI=y
+CONFIG_SENSORS_LM90=m
+CONFIG_SENSORS_INA2XX=m
+CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
+CONFIG_CPU_THERMAL=y
+CONFIG_THERMAL_EMULATION=y
+CONFIG_ARMADA_THERMAL=y
+CONFIG_BRCMSTB_THERMAL=m
+CONFIG_EXYNOS_THERMAL=y
+CONFIG_RCAR_GEN3_THERMAL=y
+CONFIG_QCOM_TSENS=y
+CONFIG_ROCKCHIP_THERMAL=m
+CONFIG_TEGRA_BPMP_THERMAL=m
+CONFIG_QCOM_SPMI_TEMP_ALARM=m
+CONFIG_UNIPHIER_THERMAL=y
+CONFIG_WATCHDOG=y
+CONFIG_S3C2410_WATCHDOG=y
+CONFIG_QCOM_WDT=m
+CONFIG_MESON_GXBB_WATCHDOG=m
+CONFIG_MESON_WATCHDOG=m
+CONFIG_RENESAS_WDT=y
+CONFIG_UNIPHIER_WATCHDOG=y
+CONFIG_BCM2835_WDT=y
+CONFIG_MFD_AXP20X_RSB=y
+CONFIG_MFD_CROS_EC=y
+CONFIG_MFD_CROS_EC_I2C=y
+CONFIG_MFD_CROS_EC_SPI=y
+CONFIG_MFD_CROS_EC_CHARDEV=m
+CONFIG_MFD_EXYNOS_LPASS=m
+CONFIG_MFD_HI6421_PMIC=y
+CONFIG_MFD_HI655X_PMIC=y
+CONFIG_MFD_MAX77620=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_RK808=y
+CONFIG_MFD_SEC_CORE=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_AXP20X=y
+CONFIG_REGULATOR_FAN53555=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_GPIO=y
+CONFIG_REGULATOR_HI6421V530=y
+CONFIG_REGULATOR_HI655X=y
+CONFIG_REGULATOR_MAX77620=y
+CONFIG_REGULATOR_PWM=y
+CONFIG_REGULATOR_QCOM_SMD_RPM=y
+CONFIG_REGULATOR_QCOM_SPMI=y
+CONFIG_REGULATOR_RK808=y
+CONFIG_REGULATOR_S2MPS11=y
+CONFIG_MEDIA_SUPPORT=m
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_ANALOG_TV_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_MEDIA_RC_SUPPORT=y
+CONFIG_RC_CORE=m
+CONFIG_RC_DEVICES=y
+CONFIG_RC_DECODERS=y
+CONFIG_IR_MESON=m
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+# CONFIG_DVB_NET is not set
+CONFIG_V4L_MEM2MEM_DRIVERS=y
+CONFIG_VIDEO_SAMSUNG_S5P_JPEG=m
+CONFIG_VIDEO_SAMSUNG_S5P_MFC=m
+CONFIG_VIDEO_SAMSUNG_EXYNOS_GSC=m
+CONFIG_VIDEO_RENESAS_FCP=m
+CONFIG_VIDEO_RENESAS_VSP1=m
+CONFIG_DRM=y
+CONFIG_DRM_NOUVEAU=m
+CONFIG_DRM_EXYNOS=m
+CONFIG_DRM_EXYNOS5433_DECON=y
+CONFIG_DRM_EXYNOS7_DECON=y
+CONFIG_DRM_EXYNOS_DSI=y
+# CONFIG_DRM_EXYNOS_DP is not set
+CONFIG_DRM_EXYNOS_HDMI=y
+CONFIG_DRM_EXYNOS_MIC=y
+CONFIG_DRM_ROCKCHIP=m
+CONFIG_ROCKCHIP_ANALOGIX_DP=y
+CONFIG_ROCKCHIP_CDN_DP=y
+CONFIG_ROCKCHIP_DW_HDMI=y
+CONFIG_ROCKCHIP_DW_MIPI_DSI=y
+CONFIG_ROCKCHIP_INNO_HDMI=y
+CONFIG_DRM_RCAR_DU=m
+CONFIG_DRM_RCAR_LVDS=y
+CONFIG_DRM_RCAR_VSP=y
+CONFIG_DRM_TEGRA=m
+CONFIG_DRM_PANEL_SIMPLE=m
+CONFIG_DRM_I2C_ADV7511=m
+CONFIG_DRM_I2C_ADV7511_AUDIO=y
+CONFIG_DRM_VC4=m
+CONFIG_DRM_HISI_HIBMC=m
+CONFIG_DRM_HISI_KIRIN=m
+CONFIG_DRM_MESON=m
+CONFIG_FB=y
+CONFIG_FB_ARMCLCD=y
+CONFIG_BACKLIGHT_GENERIC=m
+CONFIG_BACKLIGHT_PWM=m
+CONFIG_BACKLIGHT_LP855X=m
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SND_BCM2835_SOC_I2S=m
+CONFIG_SND_SOC_QCOM=y
+CONFIG_SND_SOC_APQ8016_SBC=y
+CONFIG_SND_SOC_QDSP6=y
+CONFIG_SND_SOC_MSM8996=y
+CONFIG_SND_SOC_SAMSUNG=y
+CONFIG_SND_SOC_RCAR=m
+CONFIG_SND_SOC_AK4613=m
+CONFIG_SND_SIMPLE_CARD=y
+CONFIG_SND_AUDIO_GRAPH_CARD=m
+CONFIG_SND_SOC_MSM8916_WCD_ANALOG=y
+CONFIG_SND_SOC_MSM8916_WCD_DIGITAL=y
+CONFIG_USB=y
+CONFIG_USB_OTG=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_TEGRA=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_EXYNOS=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_EXYNOS=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_RENESAS_USBHS=m
+CONFIG_USB_STORAGE=y
+CONFIG_USB_MUSB_HDRC=y
+CONFIG_USB_MUSB_SUNXI=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC2=y
+CONFIG_USB_CHIPIDEA=y
+CONFIG_USB_CHIPIDEA_UDC=y
+CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_USB_CHIPIDEA_ULPI=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_HSIC_USB3503=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_ULPI=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_RENESAS_USBHS_UDC=m
+CONFIG_USB_RENESAS_USB3=m
+CONFIG_USB_ULPI_BUS=y
+CONFIG_MMC=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_ARMMMCI=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_ACPI=y
+CONFIG_MMC_SDHCI_F_SDH30=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_OF_ARASAN=y
+CONFIG_MMC_SDHCI_OF_ESDHC=y
+CONFIG_MMC_SDHCI_CADENCE=y
+CONFIG_MMC_SDHCI_TEGRA=y
+CONFIG_MMC_MESON_GX=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_SPI=y
+CONFIG_MMC_SDHI=y
+CONFIG_MMC_DW=y
+CONFIG_MMC_DW_EXYNOS=y
+CONFIG_MMC_DW_HI3798CV200=y
+CONFIG_MMC_DW_K3=y
+CONFIG_MMC_DW_ROCKCHIP=y
+CONFIG_MMC_SUNXI=y
+CONFIG_MMC_BCM2835=y
+CONFIG_MMC_SDHCI_XENON=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_PWM=y
+CONFIG_LEDS_SYSCON=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_LEDS_TRIGGER_PANIC=y
+CONFIG_LEDS_TRIGGER_DISK=y
+CONFIG_EDAC=y
+CONFIG_EDAC_GHES=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_MAX77686=y
+CONFIG_RTC_DRV_RK808=m
+CONFIG_RTC_DRV_S5M=y
+CONFIG_RTC_DRV_DS3232=y
+CONFIG_RTC_DRV_EFI=y
+CONFIG_RTC_DRV_S3C=y
+CONFIG_RTC_DRV_PL031=y
+CONFIG_RTC_DRV_SUN6I=y
+CONFIG_RTC_DRV_ARMADA38X=y
+CONFIG_RTC_DRV_PM8XXX=m
+CONFIG_RTC_DRV_TEGRA=y
+CONFIG_RTC_DRV_XGENE=y
+CONFIG_RTC_DRV_CROS_EC=y
+CONFIG_DMADEVICES=y
+CONFIG_DMA_BCM2835=m
+CONFIG_K3_DMA=y
+CONFIG_MV_XOR_V2=y
+CONFIG_PL330_DMA=y
+CONFIG_TEGRA20_APB_DMA=y
+CONFIG_QCOM_BAM_DMA=y
+CONFIG_QCOM_HIDMA_MGMT=y
+CONFIG_QCOM_HIDMA=y
+CONFIG_RCAR_DMAC=y
+CONFIG_RENESAS_USB_DMAC=m
+CONFIG_VFIO=y
+CONFIG_VFIO_PCI=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_XEN_GNTDEV=y
+CONFIG_XEN_GRANT_DEV_ALLOC=y
+CONFIG_COMMON_CLK_RK808=y
+CONFIG_COMMON_CLK_SCPI=y
+CONFIG_COMMON_CLK_CS2000_CP=y
+CONFIG_COMMON_CLK_S2MPS11=y
+CONFIG_CLK_QORIQ=y
+CONFIG_COMMON_CLK_PWM=y
+CONFIG_COMMON_CLK_QCOM=y
+CONFIG_QCOM_CLK_SMD_RPM=y
+CONFIG_IPQ_GCC_8074=y
+CONFIG_MSM_GCC_8916=y
+CONFIG_MSM_GCC_8994=y
+CONFIG_MSM_MMCC_8996=y
+CONFIG_HWSPINLOCK=y
+CONFIG_SDM_GCC_845=y
+CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_ARM_MHU=y
+CONFIG_PLATFORM_MHU=y
+CONFIG_BCM2835_MBOX=y
+CONFIG_HI6220_MBOX=y
+CONFIG_QCOM_APCS_IPC=y
+CONFIG_ROCKCHIP_IOMMU=y
+CONFIG_TEGRA_IOMMU_SMMU=y
+CONFIG_ARM_SMMU=y
+CONFIG_ARM_SMMU_V3=y
+CONFIG_QCOM_IOMMU=y
+CONFIG_REMOTEPROC=y
+CONFIG_QCOM_ADSP_PIL=y
+CONFIG_QCOM_Q6V5_PIL=y
+CONFIG_QCOM_WCNSS_PIL=y
+CONFIG_RPMSG_QCOM_GLINK_RPM=y
+CONFIG_RPMSG_QCOM_GLINK_SMEM=y
+CONFIG_RPMSG_QCOM_SMD=y
+CONFIG_RASPBERRYPI_POWER=y
+CONFIG_QCOM_SMEM=y
+CONFIG_QCOM_SMD_RPM=y
+CONFIG_QCOM_SMP2P=y
+CONFIG_QCOM_SMSM=y
+CONFIG_QCOM_WCNSS_CTRL=y
+CONFIG_QCOM_APR=y
+CONFIG_ROCKCHIP_PM_DOMAINS=y
+CONFIG_ARCH_TEGRA_132_SOC=y
+CONFIG_ARCH_TEGRA_210_SOC=y
+CONFIG_ARCH_TEGRA_186_SOC=y
+CONFIG_ARCH_TEGRA_194_SOC=y
+CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
+CONFIG_EXTCON_USB_GPIO=y
+CONFIG_EXTCON_USBC_CROS_EC=y
+CONFIG_MEMORY=y
+CONFIG_TEGRA_MC=y
+CONFIG_IIO=y
+CONFIG_EXYNOS_ADC=y
+CONFIG_ROCKCHIP_SARADC=m
+CONFIG_IIO_CROS_EC_SENSORS_CORE=m
+CONFIG_IIO_CROS_EC_SENSORS=m
+CONFIG_IIO_CROS_EC_LIGHT_PROX=m
+CONFIG_IIO_CROS_EC_BARO=m
+CONFIG_PWM=y
+CONFIG_PWM_BCM2835=m
+CONFIG_PWM_CROS_EC=m
+CONFIG_PWM_MESON=m
+CONFIG_PWM_RCAR=m
+CONFIG_PWM_ROCKCHIP=y
+CONFIG_PWM_SAMSUNG=y
+CONFIG_PWM_TEGRA=m
+CONFIG_PHY_HISTB_COMBPHY=y
+CONFIG_PHY_HISI_INNO_USB2=y
+CONFIG_PHY_RCAR_GEN3_USB2=y
+CONFIG_PHY_RCAR_GEN3_USB3=m
+CONFIG_PHY_HI6220_USB=y
+CONFIG_PHY_QCOM_QMP=y
+CONFIG_PHY_QCOM_USB_HS=y
+CONFIG_PHY_SUN4I_USB=y
+CONFIG_PHY_MVEBU_CP110_COMPHY=y
+CONFIG_PHY_QCOM_QMP=m
+CONFIG_PHY_ROCKCHIP_INNO_USB2=y
+CONFIG_PHY_ROCKCHIP_EMMC=y
+CONFIG_PHY_ROCKCHIP_PCIE=m
+CONFIG_PHY_ROCKCHIP_TYPEC=y
+CONFIG_PHY_XGENE=y
+CONFIG_PHY_TEGRA_XUSB=y
+CONFIG_QCOM_L2_PMU=y
+CONFIG_QCOM_L3_PMU=y
+CONFIG_MESON_EFUSE=m
+CONFIG_QCOM_QFPROM=y
+CONFIG_ROCKCHIP_EFUSE=y
+CONFIG_UNIPHIER_EFUSE=y
+CONFIG_TEE=y
+CONFIG_OPTEE=y
+CONFIG_ARM_SCPI_PROTOCOL=y
+CONFIG_RASPBERRYPI_FIRMWARE=y
+CONFIG_EFI_CAPSULE_LOADER=y
+CONFIG_ACPI=y
+CONFIG_ACPI_APEI=y
+CONFIG_ACPI_APEI_GHES=y
+CONFIG_ACPI_APEI_PCIEAER=y
+CONFIG_ACPI_APEI_MEMORY_FAILURE=y
+CONFIG_ACPI_APEI_EINJ=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
+CONFIG_QUOTA=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
+CONFIG_VFAT_FS=y
+CONFIG_HUGETLBFS=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_EFIVAR_FS=y
+CONFIG_SQUASHFS=y
+CONFIG_UFS_FS=y
+CONFIG_UFS_FS_WRITE=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
+CONFIG_9P_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_LOCKUP_DETECTOR=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_FTRACE is not set
+CONFIG_MEMTEST=y
+CONFIG_SECURITY=y
+CONFIG_CRYPTO_ECHAINIV=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA256_ARM64=m
+CONFIG_CRYPTO_SHA512_ARM64=m
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m
+CONFIG_CRYPTO_CRC32_ARM64_CE=m
+CONFIG_CRYPTO_AES_ARM64=m
+CONFIG_CRYPTO_AES_ARM64_CE=m
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=m
+CONFIG_CRYPTO_CHACHA20_NEON=m
+CONFIG_CRYPTO_AES_ARM64_BS=m
+CONFIG_CRYPTO_SHA512_ARM64_CE=m
+CONFIG_CRYPTO_SHA3_ARM64=m
+CONFIG_CRYPTO_SM3_ARM64_CE=m
diff --git a/rr-cache/f48c6d7c01caa8f8b83cdf196c25aa3780acdc0d/preimage b/rr-cache/f48c6d7c01caa8f8b83cdf196c25aa3780acdc0d/preimage
new file mode 100644
index 0000000..bccd77c
--- /dev/null
+++ b/rr-cache/f48c6d7c01caa8f8b83cdf196c25aa3780acdc0d/preimage
@@ -0,0 +1,258 @@
+config QCOM_GDSC
+ bool
+ select PM_GENERIC_DOMAINS if PM
+
+config QCOM_RPMCC
+ bool
+
+config COMMON_CLK_QCOM
+ tristate "Support for Qualcomm's clock controllers"
+ depends on OF
+ depends on ARCH_QCOM || COMPILE_TEST
+ select REGMAP_MMIO
+ select RESET_CONTROLLER
+
+config QCOM_A53PLL
+ tristate "MSM8916 A53 PLL"
+ depends on COMMON_CLK_QCOM
+ default ARCH_QCOM
+ help
+ Support for the A53 PLL on MSM8916 devices. It provides
+ the CPU with frequencies above 1GHz.
+ Say Y if you want to support higher CPU frequencies on MSM8916
+ devices.
+
+config QCOM_CLK_APCS_MSM8916
+ tristate "MSM8916 APCS Clock Controller"
+ depends on COMMON_CLK_QCOM
+ depends on QCOM_APCS_IPC || COMPILE_TEST
+ default ARCH_QCOM
+ help
+ Support for the APCS Clock Controller on msm8916 devices. The
+ APCS is managing the mux and divider which feeds the CPUs.
+ Say Y if you want to support CPU frequency scaling on devices
+ such as msm8916.
+
+config QCOM_CLK_RPM
+ tristate "RPM based Clock Controller"
+ depends on COMMON_CLK_QCOM && MFD_QCOM_RPM
+ select QCOM_RPMCC
+ help
+ The RPM (Resource Power Manager) is a dedicated hardware engine for
+ managing the shared SoC resources in order to keep the lowest power
+ profile. It communicates with other hardware subsystems via shared
+ memory and accepts clock requests, aggregates the requests and turns
+ the clocks on/off or scales them on demand.
+ Say Y if you want to support the clocks exposed by the RPM on
+ platforms such as apq8064, msm8660, msm8960 etc.
+
+config QCOM_CLK_SMD_RPM
+ tristate "RPM over SMD based Clock Controller"
+ depends on COMMON_CLK_QCOM && QCOM_SMD_RPM
+ select QCOM_RPMCC
+ help
+ The RPM (Resource Power Manager) is a dedicated hardware engine for
+ managing the shared SoC resources in order to keep the lowest power
+ profile. It communicates with other hardware subsystems via shared
+ memory and accepts clock requests, aggregates the requests and turns
+ the clocks on/off or scales them on demand.
+ Say Y if you want to support the clocks exposed by the RPM on
+ platforms such as apq8016, apq8084, msm8974 etc.
+
+config APQ_GCC_8084
+ tristate "APQ8084 Global Clock Controller"
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on apq8084 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, SATA, PCIe, etc.
+
+config APQ_MMCC_8084
+ tristate "APQ8084 Multimedia Clock Controller"
+ select APQ_GCC_8084
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the multimedia clock controller on apq8084 devices.
+ Say Y if you want to support multimedia devices such as display,
+ graphics, video encode/decode, camera, etc.
+
+config IPQ_GCC_4019
+ tristate "IPQ4019 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on ipq4019 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, etc.
+
+config IPQ_GCC_806X
+ tristate "IPQ806x Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on ipq806x devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, etc.
+
+config IPQ_LCC_806X
+ tristate "IPQ806x LPASS Clock Controller"
+ select IPQ_GCC_806X
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the LPASS clock controller on ipq806x devices.
+ Say Y if you want to use audio devices such as i2s, pcm,
+ S/PDIF, etc.
+
+config IPQ_GCC_8074
+ tristate "IPQ8074 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for global clock controller on ipq8074 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, etc. Select this for the root clock
+ of ipq8074.
+
+config MSM_GCC_8660
+ tristate "MSM8660 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8660 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, etc.
+
+config MSM_GCC_8916
+ tristate "MSM8916 Global Clock Controller"
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8916 devices.
+ Say Y if you want to use devices such as UART, SPI i2c, USB,
+ SD/eMMC, display, graphics, camera etc.
+
+config MSM_GCC_8960
+ tristate "APQ8064/MSM8960 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on apq8064/msm8960 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, SATA, PCIe, etc.
+
+config MSM_LCC_8960
+ tristate "APQ8064/MSM8960 LPASS Clock Controller"
+ select MSM_GCC_8960
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the LPASS clock controller on apq8064/msm8960 devices.
+ Say Y if you want to use audio devices such as i2s, pcm,
+ SLIMBus, etc.
+
+config MDM_GCC_9615
+ tristate "MDM9615 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on mdm9615 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, etc.
+
+config MDM_LCC_9615
+ tristate "MDM9615 LPASS Clock Controller"
+ select MDM_GCC_9615
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the LPASS clock controller on mdm9615 devices.
+ Say Y if you want to use audio devices such as i2s, pcm,
+ SLIMBus, etc.
+
+config MSM_MMCC_8960
+ tristate "MSM8960 Multimedia Clock Controller"
+ select MSM_GCC_8960
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the multimedia clock controller on msm8960 devices.
+ Say Y if you want to support multimedia devices such as display,
+ graphics, video encode/decode, camera, etc.
+
+config MSM_GCC_8974
+ tristate "MSM8974 Global Clock Controller"
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8974 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, SATA, PCIe, etc.
+
+config MSM_MMCC_8974
+ tristate "MSM8974 Multimedia Clock Controller"
+ select MSM_GCC_8974
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the multimedia clock controller on msm8974 devices.
+ Say Y if you want to support multimedia devices such as display,
+ graphics, video encode/decode, camera, etc.
+
+config MSM_GCC_8994
+ tristate "MSM8994 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8994 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, UFS, SD/eMMC, PCIe, etc.
+
+config MSM_GCC_8996
+ tristate "MSM8996 Global Clock Controller"
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8996 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, UFS, SD/eMMC, PCIe, etc.
+
+config MSM_MMCC_8996
+ tristate "MSM8996 Multimedia Clock Controller"
+ select MSM_GCC_8996
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the multimedia clock controller on msm8996 devices.
+ Say Y if you want to support multimedia devices such as display,
+ graphics, video encode/decode, camera, etc.
+
+config MSM_GCC_8998
+ tristate "MSM8998 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8998 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, UFS, SD/eMMC, PCIe, etc.
+
+<<<<<<<
+=======
+config SDM_GCC_845
+ tristate "SDM845 Global Clock Controller"
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on SDM845 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2C, USB, UFS, SDDC, PCIe, etc.
+
+config SDM_VIDEOCC_845
+ tristate "SDM845 Video Clock Controller"
+ depends on COMMON_CLK_QCOM
+ select SDM_GCC_845
+ select QCOM_GDSC
+ help
+ Support for the video clock controller on SDM845 devices.
+ Say Y if you want to support video devices and functionality such as
+ video encode and decode.
+
+>>>>>>>
+config SPMI_PMIC_CLKDIV
+ tristate "SPMI PMIC clkdiv Support"
+ depends on (COMMON_CLK_QCOM && SPMI) || COMPILE_TEST
+ help
+ This driver supports the clkdiv functionality on the Qualcomm
+ Technologies, Inc. SPMI PMIC. It configures the frequency of
+ clkdiv outputs of the PMIC. These clocks are typically wired
+ through alternate functions on GPIO pins.
diff --git a/rr-cache/f6d7ddf40c8291fd785d55ded2d5dd0b8572b153/postimage b/rr-cache/f6d7ddf40c8291fd785d55ded2d5dd0b8572b153/postimage
new file mode 100644
index 0000000..08b15d9
--- /dev/null
+++ b/rr-cache/f6d7ddf40c8291fd785d55ded2d5dd0b8572b153/postimage
@@ -0,0 +1,412 @@
+/*
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8996.dtsi"
+#include "pm8994.dtsi"
+#include "pmi8994.dtsi"
+#include "apq8096-db820c-pins.dtsi"
+#include "apq8096-db820c-pmic-pins.dtsi"
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ aliases {
+ serial0 = &blsp2_uart1;
+ serial1 = &blsp2_uart2;
+ serial2 = &blsp1_uart1;
+ i2c0 = &blsp1_i2c2;
+ i2c1 = &blsp2_i2c1;
+ i2c2 = &blsp2_i2c0;
+ spi0 = &blsp1_spi0;
+ spi1 = &blsp2_spi5;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ clocks {
+ divclk4: divclk4 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ clock-output-names = "divclk4";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&divclk4_pin_a>;
+ };
+ };
+
+ soc {
+ serial@7570000 {
+ label = "BT-UART";
+ status = "okay";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp1_uart1_default>;
+ pinctrl-1 = <&blsp1_uart1_sleep>;
+
+ bluetooth {
+ compatible = "qcom,qca6174-bt";
+
+ /* bt_disable_n gpio */
+ enable-gpios = <&pm8994_gpios 19 GPIO_ACTIVE_HIGH>;
+
+ clocks = <&divclk4>;
+ };
+ };
+
+ serial@75b0000 {
+ label = "LS-UART1";
+ status = "okay";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_uart1_2pins_default>;
+ pinctrl-1 = <&blsp2_uart1_2pins_sleep>;
+ };
+
+ serial@75b1000 {
+ label = "LS-UART0";
+ status = "okay";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_uart2_4pins_default>;
+ pinctrl-1 = <&blsp2_uart2_4pins_sleep>;
+ };
+
+ i2c@7577000 {
+ /* On Low speed expansion */
+ label = "LS-I2C0";
+ status = "okay";
+ };
+
+ i2c@75b6000 {
+ /* On Low speed expansion */
+ label = "LS-I2C1";
+ status = "okay";
+ };
+
+ spi@7575000 {
+ /* On Low speed expansion */
+ label = "LS-SPI0";
+ status = "okay";
+ };
+
+ i2c@75b5000 {
+ /* On High speed expansion */
+ label = "HS-I2C2";
+ status = "okay";
+ };
+
+ spi@75ba000{
+ /* On High speed expansion */
+ label = "HS-SPI1";
+ status = "okay";
+ };
+
+ sdhci@74a4900 {
+ /* External SD card */
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
+ pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>;
+ cd-gpios = <&msmgpio 38 0x1>;
+ vmmc-supply = <&pm8994_l21>;
+ vqmmc-supply = <&pm8994_l13>;
+ status = "okay";
+ };
+
+ phy@627000 {
+ status = "okay";
+ };
+
+ ufshc@624000 {
+ status = "okay";
+ };
+
+ phy@34000 {
+ status = "okay";
+ };
+
+ phy@7410000 {
+ status = "okay";
+ };
+
+ phy@7411000 {
+ status = "okay";
+ };
+
+ phy@7412000 {
+ status = "okay";
+ };
+
+ usb@6a00000 {
+ status = "okay";
+
+ dwc3@6a00000 {
+ extcon = <&usb3_id>;
+ dr_mode = "otg";
+ };
+ };
+
+ usb3_id: usb3-id {
+ compatible = "linux,extcon-usb-gpio";
+ id-gpio = <&pm8994_gpios 22 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb3_vbus_det_gpio>;
+ };
+
+ usb@7600000 {
+ status = "okay";
+
+ dwc3@7600000 {
+ extcon = <&usb2_id>;
+ dr_mode = "otg";
+ maximum-speed = "high-speed";
+ };
+ };
+
+ usb2_id: usb2-id {
+ compatible = "linux,extcon-usb-gpio";
+ id-gpio = <&pmi8994_gpios 6 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb2_vbus_det_gpio>;
+ };
+
+ wlan_en: wlan-en-1-8v {
+ pinctrl-names = "default";
+ pinctrl-0 = <&wlan_en_gpios>;
+ compatible = "regulator-fixed";
+ regulator-name = "wlan-en-regulator";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&pm8994_gpios 8 0>;
+
+ /* WLAN card specific delay */
+ startup-delay-us = <70000>;
+ enable-active-high;
+ };
+
+ agnoc@0 {
+ pcie@600000 {
+ status = "okay";
+ perst-gpio = <&msmgpio 35 GPIO_ACTIVE_LOW>;
+ vddpe-3v3-supply = <&wlan_en>;
+ };
+
+ pcie@608000 {
+ status = "okay";
+ perst-gpio = <&msmgpio 130 GPIO_ACTIVE_LOW>;
+ };
+
+ pcie@610000 {
+ status = "okay";
+ perst-gpio = <&msmgpio 114 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ mdss@900000 {
+ status = "okay";
+
+ mdp@901000 {
+ status = "okay";
+ };
+
+ hdmi-phy@9a0600 {
+ status = "okay";
+
+ vddio-supply = <&pm8994_l12>;
+ vcca-supply = <&pm8994_l28>;
+ };
+
+ hdmi-tx@9a0000 {
+ status = "okay";
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&hdmi_hpd_active &hdmi_ddc_active>;
+ pinctrl-1 = <&hdmi_hpd_suspend &hdmi_ddc_suspend>;
+
+ core-vdda-supply = <&pm8994_l12>;
+ core-vcc-supply = <&pm8994_s4>;
+ };
+ };
+ };
+
+
+ gpio_keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ autorepeat;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&volume_up_gpio>;
+
+ button@0 {
+ label = "Volume Up";
+ linux,code = <KEY_VOLUMEUP>;
+ gpios = <&pm8994_gpios 2 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ rpm-glink {
+ rpm_requests {
+ pm8994-regulators {
+ vdd_l1-supply = <&pm8994_s3>;
+ vdd_l2_l26_l28-supply = <&pm8994_s3>;
+ vdd_l3_l11-supply = <&pm8994_s3>;
+ vdd_l4_l27_l31-supply = <&pm8994_s3>;
+ vdd_l5_l7-supply = <&pm8994_s5>;
+ vdd_l14_l15-supply = <&pm8994_s5>;
+ vdd_l20_l21-supply = <&pm8994_s5>;
+ vdd_l25-supply = <&pm8994_s3>;
+
+ s3 {
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <1300000>;
+ };
+
+ /**
+ * 1.8v required on LS expansion
+ * for mezzanine boards
+ */
+ s4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+ s5 {
+ regulator-min-microvolt = <2150000>;
+ regulator-max-microvolt = <2150000>;
+ };
+ s7 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ };
+
+ l1 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ };
+ l2 {
+ regulator-min-microvolt = <1250000>;
+ regulator-max-microvolt = <1250000>;
+ };
+ l3 {
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ };
+ l4 {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ };
+ l6 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+ l8 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l9 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l10 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l11 {
+ regulator-min-microvolt = <1150000>;
+ regulator-max-microvolt = <1150000>;
+ };
+ l12 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l13 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ };
+ l14 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l15 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l16 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2700000>;
+ };
+ l17 {
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ };
+ l18 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2900000>;
+ };
+ l19 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ };
+ l20 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ regulator-allow-set-load;
+ };
+ l21 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ };
+ l22 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+ l23 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ };
+ l24 {
+ regulator-min-microvolt = <3075000>;
+ regulator-max-microvolt = <3075000>;
+ };
+ l25 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-allow-set-load;
+ };
+ l27 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ };
+ l28 {
+ regulator-min-microvolt = <925000>;
+ regulator-max-microvolt = <925000>;
+ regulator-allow-set-load;
+ };
+ l29 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ };
+ l30 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l32 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ };
+ };
+ };
+};
diff --git a/rr-cache/f6d7ddf40c8291fd785d55ded2d5dd0b8572b153/preimage b/rr-cache/f6d7ddf40c8291fd785d55ded2d5dd0b8572b153/preimage
new file mode 100644
index 0000000..cb01c7d
--- /dev/null
+++ b/rr-cache/f6d7ddf40c8291fd785d55ded2d5dd0b8572b153/preimage
@@ -0,0 +1,423 @@
+/*
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8996.dtsi"
+#include "pm8994.dtsi"
+#include "pmi8994.dtsi"
+#include "apq8096-db820c-pins.dtsi"
+#include "apq8096-db820c-pmic-pins.dtsi"
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ aliases {
+ serial0 = &blsp2_uart1;
+ serial1 = &blsp2_uart2;
+ serial2 = &blsp1_uart1;
+ i2c0 = &blsp1_i2c2;
+ i2c1 = &blsp2_i2c1;
+ i2c2 = &blsp2_i2c0;
+ spi0 = &blsp1_spi0;
+ spi1 = &blsp2_spi5;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ clocks {
+ divclk4: divclk4 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ clock-output-names = "divclk4";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&divclk4_pin_a>;
+ };
+ };
+
+ soc {
+ serial@7570000 {
+ label = "BT-UART";
+ status = "okay";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp1_uart1_default>;
+ pinctrl-1 = <&blsp1_uart1_sleep>;
+
+ bluetooth {
+ compatible = "qcom,qca6174-bt";
+
+ /* bt_disable_n gpio */
+ enable-gpios = <&pm8994_gpios 19 GPIO_ACTIVE_HIGH>;
+
+ clocks = <&divclk4>;
+ };
+ };
+
+ serial@75b0000 {
+ label = "LS-UART1";
+ status = "okay";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_uart1_2pins_default>;
+ pinctrl-1 = <&blsp2_uart1_2pins_sleep>;
+ };
+
+ serial@75b1000 {
+ label = "LS-UART0";
+ status = "okay";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_uart2_4pins_default>;
+ pinctrl-1 = <&blsp2_uart2_4pins_sleep>;
+ };
+
+ i2c@7577000 {
+ /* On Low speed expansion */
+ label = "LS-I2C0";
+ status = "okay";
+ };
+
+ i2c@75b6000 {
+ /* On Low speed expansion */
+ label = "LS-I2C1";
+ status = "okay";
+ };
+
+ spi@7575000 {
+ /* On Low speed expansion */
+ label = "LS-SPI0";
+ status = "okay";
+ };
+
+ i2c@75b5000 {
+ /* On High speed expansion */
+ label = "HS-I2C2";
+ status = "okay";
+ };
+
+ spi@75ba000{
+ /* On High speed expansion */
+ label = "HS-SPI1";
+ status = "okay";
+ };
+
+ sdhci@74a4900 {
+ /* External SD card */
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
+ pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>;
+ cd-gpios = <&msmgpio 38 0x1>;
+ vmmc-supply = <&pm8994_l21>;
+ vqmmc-supply = <&pm8994_l13>;
+ status = "okay";
+ };
+
+ phy@627000 {
+ status = "okay";
+ };
+
+ ufshc@624000 {
+ status = "okay";
+ };
+
+ phy@34000 {
+ status = "okay";
+ };
+
+ phy@7410000 {
+ status = "okay";
+ };
+
+ phy@7411000 {
+ status = "okay";
+ };
+
+ phy@7412000 {
+ status = "okay";
+ };
+
+ usb@6a00000 {
+ status = "okay";
+
+ dwc3@6a00000 {
+ extcon = <&usb3_id>;
+ dr_mode = "otg";
+ };
+ };
+
+ usb3_id: usb3-id {
+ compatible = "linux,extcon-usb-gpio";
+ id-gpio = <&pm8994_gpios 22 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb3_vbus_det_gpio>;
+ };
+
+ usb@7600000 {
+ status = "okay";
+
+ dwc3@7600000 {
+ extcon = <&usb2_id>;
+ dr_mode = "otg";
+ maximum-speed = "high-speed";
+ };
+ };
+
+ usb2_id: usb2-id {
+ compatible = "linux,extcon-usb-gpio";
+ id-gpio = <&pmi8994_gpios 6 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb2_vbus_det_gpio>;
+ };
+
+ wlan_en: wlan-en-1-8v {
+ pinctrl-names = "default";
+ pinctrl-0 = <&wlan_en_gpios>;
+ compatible = "regulator-fixed";
+ regulator-name = "wlan-en-regulator";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&pm8994_gpios 8 0>;
+
+ /* WLAN card specific delay */
+ startup-delay-us = <70000>;
+ enable-active-high;
+ };
+
+ agnoc@0 {
+ pcie@600000 {
+ status = "okay";
+ perst-gpio = <&msmgpio 35 GPIO_ACTIVE_LOW>;
+ vddpe-3v3-supply = <&wlan_en>;
+ };
+
+ pcie@608000 {
+ status = "okay";
+ perst-gpio = <&msmgpio 130 GPIO_ACTIVE_LOW>;
+ };
+
+ pcie@610000 {
+ status = "okay";
+ perst-gpio = <&msmgpio 114 GPIO_ACTIVE_LOW>;
+ };
+ };
+<<<<<<<
+=======
+
+ ufsphy@627000 {
+ status = "okay";
+ };
+
+ ufshc@624000 {
+ status = "okay";
+ };
+
+ mdss@900000 {
+ status = "okay";
+
+ mdp@901000 {
+ status = "okay";
+ };
+
+ hdmi-phy@9a0600 {
+ status = "okay";
+
+ vddio-supply = <&pm8994_l12>;
+ vcca-supply = <&pm8994_l28>;
+ };
+
+ hdmi-tx@9a0000 {
+ status = "okay";
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&hdmi_hpd_active &hdmi_ddc_active>;
+ pinctrl-1 = <&hdmi_hpd_suspend &hdmi_ddc_suspend>;
+
+ core-vdda-supply = <&pm8994_l12>;
+ core-vcc-supply = <&pm8994_s4>;
+ };
+ };
+>>>>>>>
+ };
+
+
+ gpio_keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ autorepeat;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&volume_up_gpio>;
+
+ button@0 {
+ label = "Volume Up";
+ linux,code = <KEY_VOLUMEUP>;
+ gpios = <&pm8994_gpios 2 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ rpm-glink {
+ rpm_requests {
+ pm8994-regulators {
+ vdd_l1-supply = <&pm8994_s3>;
+ vdd_l2_l26_l28-supply = <&pm8994_s3>;
+ vdd_l3_l11-supply = <&pm8994_s3>;
+ vdd_l4_l27_l31-supply = <&pm8994_s3>;
+ vdd_l5_l7-supply = <&pm8994_s5>;
+ vdd_l14_l15-supply = <&pm8994_s5>;
+ vdd_l20_l21-supply = <&pm8994_s5>;
+ vdd_l25-supply = <&pm8994_s3>;
+
+ s3 {
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <1300000>;
+ };
+
+ /**
+ * 1.8v required on LS expansion
+ * for mezzanine boards
+ */
+ s4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+ s5 {
+ regulator-min-microvolt = <2150000>;
+ regulator-max-microvolt = <2150000>;
+ };
+ s7 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ };
+
+ l1 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ };
+ l2 {
+ regulator-min-microvolt = <1250000>;
+ regulator-max-microvolt = <1250000>;
+ };
+ l3 {
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ };
+ l4 {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ };
+ l6 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+ l8 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l9 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l10 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l11 {
+ regulator-min-microvolt = <1150000>;
+ regulator-max-microvolt = <1150000>;
+ };
+ l12 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l13 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ };
+ l14 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l15 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l16 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2700000>;
+ };
+ l17 {
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ };
+ l18 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2900000>;
+ };
+ l19 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ };
+ l20 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ regulator-allow-set-load;
+ };
+ l21 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ };
+ l22 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+ l23 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ };
+ l24 {
+ regulator-min-microvolt = <3075000>;
+ regulator-max-microvolt = <3075000>;
+ };
+ l25 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-allow-set-load;
+ };
+ l27 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ };
+ l28 {
+ regulator-min-microvolt = <925000>;
+ regulator-max-microvolt = <925000>;
+ regulator-allow-set-load;
+ };
+ l29 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ };
+ l30 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l32 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ };
+ };
+ };
+};
diff --git a/rr-cache/f8a2c05b7200af21e9bd94cbb246b77e7384a142/postimage b/rr-cache/f8a2c05b7200af21e9bd94cbb246b77e7384a142/postimage
new file mode 100644
index 0000000..9f5fb57
--- /dev/null
+++ b/rr-cache/f8a2c05b7200af21e9bd94cbb246b77e7384a142/postimage
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __Q6_ASM_H__
+#define __Q6_ASM_H__
+#include "q6dsp-common.h"
+#include <dt-bindings/sound/qcom,q6asm.h>
+
+/* ASM client callback events */
+#define CMD_PAUSE 0x0001
+#define ASM_CLIENT_EVENT_CMD_PAUSE_DONE 0x1001
+#define CMD_FLUSH 0x0002
+#define ASM_CLIENT_EVENT_CMD_FLUSH_DONE 0x1002
+#define CMD_EOS 0x0003
+#define ASM_CLIENT_EVENT_CMD_EOS_DONE 0x1003
+#define CMD_CLOSE 0x0004
+#define ASM_CLIENT_EVENT_CMD_CLOSE_DONE 0x1004
+#define CMD_OUT_FLUSH 0x0005
+#define ASM_CLIENT_EVENT_CMD_OUT_FLUSH_DONE 0x1005
+#define CMD_SUSPEND 0x0006
+#define ASM_CLIENT_EVENT_CMD_SUSPEND_DONE 0x1006
+#define ASM_CLIENT_EVENT_CMD_RUN_DONE 0x1008
+#define ASM_CLIENT_EVENT_DATA_WRITE_DONE 0x1009
+#define ASM_CLIENT_EVENT_DATA_READ_DONE 0x100a
+
+enum {
+ LEGACY_PCM_MODE = 0,
+ LOW_LATENCY_PCM_MODE,
+ ULTRA_LOW_LATENCY_PCM_MODE,
+ ULL_POST_PROCESSING_PCM_MODE,
+};
+
+#define MAX_SESSIONS 8
+#define NO_TIMESTAMP 0xFF00
+#define FORMAT_LINEAR_PCM 0x0000
+
+typedef void (*q6asm_cb) (uint32_t opcode, uint32_t token,
+ void *payload, void *priv);
+struct audio_client;
+struct audio_client *q6asm_audio_client_alloc(struct device *dev,
+ q6asm_cb cb, void *priv,
+ int session_id, int perf_mode);
+void q6asm_audio_client_free(struct audio_client *ac);
+int q6asm_write_async(struct audio_client *ac, uint32_t len, uint32_t msw_ts,
+ uint32_t lsw_ts, uint32_t flags);
+int q6asm_open_write(struct audio_client *ac, uint32_t format,
+ uint16_t bits_per_sample);
+
+int q6asm_open_read(struct audio_client *ac, uint32_t format,
+ uint16_t bits_per_sample);
+int q6asm_enc_cfg_blk_pcm_format_support(struct audio_client *ac,
+ uint32_t rate, uint32_t channels, uint16_t bits_per_sample);
+int q6asm_read(struct audio_client *ac);
+
+int q6asm_media_format_block_multi_ch_pcm(struct audio_client *ac,
+ uint32_t rate, uint32_t channels,
+ u8 channel_map[PCM_MAX_NUM_CHANNEL],
+ uint16_t bits_per_sample);
+int q6asm_run(struct audio_client *ac, uint32_t flags, uint32_t msw_ts,
+ uint32_t lsw_ts);
+int q6asm_run_nowait(struct audio_client *ac, uint32_t flags, uint32_t msw_ts,
+ uint32_t lsw_ts);
+int q6asm_cmd(struct audio_client *ac, int cmd);
+int q6asm_cmd_nowait(struct audio_client *ac, int cmd);
+int q6asm_get_session_id(struct audio_client *ac);
+int q6asm_map_memory_regions(unsigned int dir,
+ struct audio_client *ac,
+ phys_addr_t phys,
+ size_t bufsz, unsigned int bufcnt);
+int q6asm_unmap_memory_regions(unsigned int dir, struct audio_client *ac);
+#endif /* __Q6_ASM_H__ */
diff --git a/rr-cache/f8a2c05b7200af21e9bd94cbb246b77e7384a142/preimage b/rr-cache/f8a2c05b7200af21e9bd94cbb246b77e7384a142/preimage
new file mode 100644
index 0000000..c77ec6f
--- /dev/null
+++ b/rr-cache/f8a2c05b7200af21e9bd94cbb246b77e7384a142/preimage
@@ -0,0 +1,93 @@
+<<<<<<<
+/* SPDX-License-Identifier: GPL-2.0 */
+=======
+// SPDX-License-Identifier: GPL-2.0
+>>>>>>>
+#ifndef __Q6_ASM_H__
+#define __Q6_ASM_H__
+#include "q6dsp-common.h"
+#include <dt-bindings/sound/qcom,q6asm.h>
+
+/* ASM client callback events */
+#define CMD_PAUSE 0x0001
+#define ASM_CLIENT_EVENT_CMD_PAUSE_DONE 0x1001
+#define CMD_FLUSH 0x0002
+#define ASM_CLIENT_EVENT_CMD_FLUSH_DONE 0x1002
+#define CMD_EOS 0x0003
+#define ASM_CLIENT_EVENT_CMD_EOS_DONE 0x1003
+#define CMD_CLOSE 0x0004
+#define ASM_CLIENT_EVENT_CMD_CLOSE_DONE 0x1004
+#define CMD_OUT_FLUSH 0x0005
+#define ASM_CLIENT_EVENT_CMD_OUT_FLUSH_DONE 0x1005
+#define CMD_SUSPEND 0x0006
+#define ASM_CLIENT_EVENT_CMD_SUSPEND_DONE 0x1006
+#define ASM_CLIENT_EVENT_CMD_RUN_DONE 0x1008
+#define ASM_CLIENT_EVENT_DATA_WRITE_DONE 0x1009
+#define ASM_CLIENT_EVENT_DATA_READ_DONE 0x100a
+
+enum {
+ LEGACY_PCM_MODE = 0,
+ LOW_LATENCY_PCM_MODE,
+ ULTRA_LOW_LATENCY_PCM_MODE,
+ ULL_POST_PROCESSING_PCM_MODE,
+};
+
+<<<<<<<
+#define MAX_SESSIONS 16
+#define NO_TIMESTAMP 0xFF00
+#define FORMAT_LINEAR_PCM 0x0000
+
+void q6asm_set_dai_data(struct device *dev, void *data);
+void *q6asm_get_dai_data(struct device *dev);
+int q6asm_dai_probe(struct device *dev);
+int q6asm_dai_remove(struct device *dev);
+
+=======
+#define MAX_SESSIONS 8
+#define NO_TIMESTAMP 0xFF00
+#define FORMAT_LINEAR_PCM 0x0000
+
+>>>>>>>
+typedef void (*q6asm_cb) (uint32_t opcode, uint32_t token,
+ void *payload, void *priv);
+struct audio_client;
+struct audio_client *q6asm_audio_client_alloc(struct device *dev,
+ q6asm_cb cb, void *priv,
+<<<<<<<
+ int session_id);
+=======
+ int session_id, int perf_mode);
+>>>>>>>
+void q6asm_audio_client_free(struct audio_client *ac);
+int q6asm_write_async(struct audio_client *ac, uint32_t len, uint32_t msw_ts,
+ uint32_t lsw_ts, uint32_t flags);
+int q6asm_open_write(struct audio_client *ac, uint32_t format,
+ uint16_t bits_per_sample);
+
+int q6asm_open_read(struct audio_client *ac, uint32_t format,
+ uint16_t bits_per_sample);
+int q6asm_enc_cfg_blk_pcm_format_support(struct audio_client *ac,
+ uint32_t rate, uint32_t channels, uint16_t bits_per_sample);
+int q6asm_read(struct audio_client *ac);
+
+int q6asm_media_format_block_multi_ch_pcm(struct audio_client *ac,
+ uint32_t rate, uint32_t channels,
+<<<<<<<
+ u8 channel_map[PCM_FORMAT_MAX_NUM_CHANNEL],
+=======
+ u8 channel_map[PCM_MAX_NUM_CHANNEL],
+>>>>>>>
+ uint16_t bits_per_sample);
+int q6asm_run(struct audio_client *ac, uint32_t flags, uint32_t msw_ts,
+ uint32_t lsw_ts);
+int q6asm_run_nowait(struct audio_client *ac, uint32_t flags, uint32_t msw_ts,
+ uint32_t lsw_ts);
+int q6asm_cmd(struct audio_client *ac, int cmd);
+int q6asm_cmd_nowait(struct audio_client *ac, int cmd);
+int q6asm_get_session_id(struct audio_client *ac);
+int q6asm_map_memory_regions(unsigned int dir,
+ struct audio_client *ac,
+ phys_addr_t phys,
+ size_t bufsz, unsigned int bufcnt);
+int q6asm_unmap_memory_regions(unsigned int dir, struct audio_client *ac);
+#endif /* __Q6_ASM_H__ */
diff --git a/rr-cache/f8a2c05b7200af21e9bd94cbb246b77e7384a142/thisimage b/rr-cache/f8a2c05b7200af21e9bd94cbb246b77e7384a142/thisimage
new file mode 100644
index 0000000..c77ec6f
--- /dev/null
+++ b/rr-cache/f8a2c05b7200af21e9bd94cbb246b77e7384a142/thisimage
@@ -0,0 +1,93 @@
+<<<<<<<
+/* SPDX-License-Identifier: GPL-2.0 */
+=======
+// SPDX-License-Identifier: GPL-2.0
+>>>>>>>
+#ifndef __Q6_ASM_H__
+#define __Q6_ASM_H__
+#include "q6dsp-common.h"
+#include <dt-bindings/sound/qcom,q6asm.h>
+
+/* ASM client callback events */
+#define CMD_PAUSE 0x0001
+#define ASM_CLIENT_EVENT_CMD_PAUSE_DONE 0x1001
+#define CMD_FLUSH 0x0002
+#define ASM_CLIENT_EVENT_CMD_FLUSH_DONE 0x1002
+#define CMD_EOS 0x0003
+#define ASM_CLIENT_EVENT_CMD_EOS_DONE 0x1003
+#define CMD_CLOSE 0x0004
+#define ASM_CLIENT_EVENT_CMD_CLOSE_DONE 0x1004
+#define CMD_OUT_FLUSH 0x0005
+#define ASM_CLIENT_EVENT_CMD_OUT_FLUSH_DONE 0x1005
+#define CMD_SUSPEND 0x0006
+#define ASM_CLIENT_EVENT_CMD_SUSPEND_DONE 0x1006
+#define ASM_CLIENT_EVENT_CMD_RUN_DONE 0x1008
+#define ASM_CLIENT_EVENT_DATA_WRITE_DONE 0x1009
+#define ASM_CLIENT_EVENT_DATA_READ_DONE 0x100a
+
+enum {
+ LEGACY_PCM_MODE = 0,
+ LOW_LATENCY_PCM_MODE,
+ ULTRA_LOW_LATENCY_PCM_MODE,
+ ULL_POST_PROCESSING_PCM_MODE,
+};
+
+<<<<<<<
+#define MAX_SESSIONS 16
+#define NO_TIMESTAMP 0xFF00
+#define FORMAT_LINEAR_PCM 0x0000
+
+void q6asm_set_dai_data(struct device *dev, void *data);
+void *q6asm_get_dai_data(struct device *dev);
+int q6asm_dai_probe(struct device *dev);
+int q6asm_dai_remove(struct device *dev);
+
+=======
+#define MAX_SESSIONS 8
+#define NO_TIMESTAMP 0xFF00
+#define FORMAT_LINEAR_PCM 0x0000
+
+>>>>>>>
+typedef void (*q6asm_cb) (uint32_t opcode, uint32_t token,
+ void *payload, void *priv);
+struct audio_client;
+struct audio_client *q6asm_audio_client_alloc(struct device *dev,
+ q6asm_cb cb, void *priv,
+<<<<<<<
+ int session_id);
+=======
+ int session_id, int perf_mode);
+>>>>>>>
+void q6asm_audio_client_free(struct audio_client *ac);
+int q6asm_write_async(struct audio_client *ac, uint32_t len, uint32_t msw_ts,
+ uint32_t lsw_ts, uint32_t flags);
+int q6asm_open_write(struct audio_client *ac, uint32_t format,
+ uint16_t bits_per_sample);
+
+int q6asm_open_read(struct audio_client *ac, uint32_t format,
+ uint16_t bits_per_sample);
+int q6asm_enc_cfg_blk_pcm_format_support(struct audio_client *ac,
+ uint32_t rate, uint32_t channels, uint16_t bits_per_sample);
+int q6asm_read(struct audio_client *ac);
+
+int q6asm_media_format_block_multi_ch_pcm(struct audio_client *ac,
+ uint32_t rate, uint32_t channels,
+<<<<<<<
+ u8 channel_map[PCM_FORMAT_MAX_NUM_CHANNEL],
+=======
+ u8 channel_map[PCM_MAX_NUM_CHANNEL],
+>>>>>>>
+ uint16_t bits_per_sample);
+int q6asm_run(struct audio_client *ac, uint32_t flags, uint32_t msw_ts,
+ uint32_t lsw_ts);
+int q6asm_run_nowait(struct audio_client *ac, uint32_t flags, uint32_t msw_ts,
+ uint32_t lsw_ts);
+int q6asm_cmd(struct audio_client *ac, int cmd);
+int q6asm_cmd_nowait(struct audio_client *ac, int cmd);
+int q6asm_get_session_id(struct audio_client *ac);
+int q6asm_map_memory_regions(unsigned int dir,
+ struct audio_client *ac,
+ phys_addr_t phys,
+ size_t bufsz, unsigned int bufcnt);
+int q6asm_unmap_memory_regions(unsigned int dir, struct audio_client *ac);
+#endif /* __Q6_ASM_H__ */