Message ID | 20180511075945.16548-3-hch@lst.de (mailing list archive) |
---|---|
State | Not Applicable |
Headers | show |
SGkgQ2hyaXN0b3BoLA0KDQpPbiBGcmksIDIwMTgtMDUtMTEgYXQgMDk6NTkgKzAyMDAsIENocmlz dG9waCBIZWxsd2lnIHdyb3RlOg0KDQpbc25pcF0NCg0KVGhlcmUgc2VlbXMgdG8gYmUgb25lIHN1 YnRsZSBpc3N1ZSB3aXRoIG1hcC91bm1hcCBjb2RlLg0KV2hpbGUgaW52ZXN0aWdhdGluZyBwcm9i bGVtcyBvbiBBUkMgSSBhZGRlZCBpbnN0cnVtZW50YXRpb24gYXMgYmVsb3c6DQotLS0tLS0tLS0t LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tPjgtLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t LS0tLS0tLS0tLS0NCi0tLSBhL2FyY2gvYXJjL21tL2RtYS5jDQorKysgYi9hcmNoL2FyYy9tbS9k bWEuYw0KQEAgLTE1MiwxNCArMTUyLDM3IEBAIHN0YXRpYyB2b2lkIF9kbWFfY2FjaGVfc3luYyhz dHJ1Y3QgZGV2aWNlICpkZXYsIHBoeXNfYWRkcl90IHBhZGRyLCBzaXplX3Qgc2l6ZSwNCiAgICAg ICAgfQ0KIH0NCiANCitzdGF0aWMgY29uc3QgY2hhciAqZGlyX3RvX3N0cihlbnVtIGRtYV9kYXRh X2RpcmVjdGlvbiBkaXIpDQorew0KKyAgICAgICBzd2l0Y2ggKGRpcikgew0KKyAgICAgICBjYXNl IERNQV9CSURJUkVDVElPTkFMOiByZXR1cm4gIkRNQV9CSURJUkVDVElPTkFMIjsNCisgICAgICAg Y2FzZSBETUFfVE9fREVWSUNFOiByZXR1cm4gIkRNQV9UT19ERVZJQ0UiOw0KKyAgICAgICBjYXNl IERNQV9GUk9NX0RFVklDRTogcmV0dXJuICJETUFfRlJPTV9ERVZJQ0UiOw0KKyAgICAgICBjYXNl IERNQV9OT05FOiByZXR1cm4gIkRNQV9OT05FIjsNCisgICAgICAgZGVmYXVsdDogcmV0dXJuICJX Uk9OR19WQUxVRSEiOw0KKyAgICAgICB9DQorfQ0KKw0KIHZvaWQgYXJjaF9zeW5jX2RtYV9mb3Jf ZGV2aWNlKHN0cnVjdCBkZXZpY2UgKmRldiwgcGh5c19hZGRyX3QgcGFkZHIsDQogICAgICAgICAg ICAgICAgc2l6ZV90IHNpemUsIGVudW0gZG1hX2RhdGFfZGlyZWN0aW9uIGRpcikNCiB7DQorICAg ICAgIGlmIChkaXIgIT0gRE1BX1RPX0RFVklDRSl7DQorICAgICAgICAgICAgICAgZHVtcF9zdGFj aygpOw0KKyAgICAgICAgICAgICAgIHByaW50aygiICoqKiAlc0AlZDogRE1BIGRpcmVjdGlvbiBp cyAlcyBpbnN0ZWFkIG9mICVzXG4iLA0KKyAgICAgICAgICAgICAgICAgICAgICBfX2Z1bmNfXywg X19MSU5FX18sIGRpcl90b19zdHIoZGlyKSwgZGlyX3RvX3N0cihETUFfVE9fREVWSUNFKSk7DQor ICAgICAgIH0NCisNCiAgICAgICAgcmV0dXJuIF9kbWFfY2FjaGVfc3luYyhkZXYsIHBhZGRyLCBz aXplLCBkaXIpOw0KIH0NCiANCiB2b2lkIGFyY2hfc3luY19kbWFfZm9yX2NwdShzdHJ1Y3QgZGV2 aWNlICpkZXYsIHBoeXNfYWRkcl90IHBhZGRyLA0KICAgICAgICAgICAgICAgIHNpemVfdCBzaXpl LCBlbnVtIGRtYV9kYXRhX2RpcmVjdGlvbiBkaXIpDQogew0KKyAgICAgICBpZiAoZGlyICE9IERN QV9GUk9NX0RFVklDRSkgew0KKyAgICAgICAgICAgICAgIGR1bXBfc3RhY2soKTsNCisgICAgICAg ICAgICAgICBwcmludGsoIiAqKiogJXNAJWQ6IERNQSBkaXJlY3Rpb24gaXMgJXMgaW5zdGVhZCBv ZiAlc1xuIiwNCisgICAgICAgICAgICAgICAgICAgICAgX19mdW5jX18sIF9fTElORV9fLCBkaXJf dG9fc3RyKGRpciksIGRpcl90b19zdHIoRE1BX0ZST01fREVWSUNFKSk7DQorICAgICAgIH0NCisN CiAgICAgICAgcmV0dXJuIF9kbWFfY2FjaGVfc3luYyhkZXYsIHBhZGRyLCBzaXplLCBkaXIpOw0K IH0NCi0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0+OC0tLS0tLS0tLS0t LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLQ0KDQpBbmQgd2l0aCB0aGF0IEkgbm90aWNlZCBhIGJp dCB1bmV4cGVjdGVkIG91dHB1dCwgc2VlIGJlbG93Og0KLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t LS0tLS0tLS0tLS0tLS0tLT44LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tDQpT dGFjayBUcmFjZToNCiAgYXJjX3Vud2luZF9jb3JlLmNvbnN0cHJvcC4xKzB4ZDQvMHhmOA0KICBk dW1wX3N0YWNrKzB4NjgvMHg4MA0KICBhcmNoX3N5bmNfZG1hX2Zvcl9kZXZpY2UrMHgzNC8weGM0 DQogIGRtYV9ub25jb2hlcmVudF9tYXBfc2crMHg4MC8weDk0DQogIF9fZHdfbWNpX3N0YXJ0X3Jl cXVlc3QrMHgxZWUvMHg4NjgNCiAgZHdfbWNpX3JlcXVlc3QrMHgxN2UvMHgxYzgNCiAgbW1jX3dh aXRfZm9yX3JlcSsweDEwNi8weDFhYw0KICBtbWNfYXBwX3NkX3N0YXR1cysweDEwOC8weDEzMA0K ICBtbWNfc2Rfc2V0dXBfY2FyZCsweGM2LzB4MmU4DQogIG1tY19hdHRhY2hfc2QrMHgxYjYvMHgz OTQNCiAgbW1jX3Jlc2NhbisweDJmNC8weDNiYw0KICBwcm9jZXNzX29uZV93b3JrKzB4MTk0LzB4 MzQ4DQogIHdvcmtlcl90aHJlYWQrMHhmMi8weDQ3OA0KICBrdGhyZWFkKzB4MTIwLzB4MTNjDQog IHJldF9mcm9tX2ZvcmsrMHgxOC8weDFjDQogKioqIGFyY2hfc3luY19kbWFfZm9yX2RldmljZUAx NzI6IERNQSBkaXJlY3Rpb24gaXMgRE1BX0ZST01fREVWSUNFIGluc3RlYWQgb2YgRE1BX1RPX0RF VklDRQ0KLi4uDQpTdGFjayBUcmFjZToNCiAgYXJjX3Vud2luZF9jb3JlLmNvbnN0cHJvcC4xKzB4 ZDQvMHhmOA0KICBkdW1wX3N0YWNrKzB4NjgvMHg4MA0KICBhcmNoX3N5bmNfZG1hX2Zvcl9kZXZp Y2UrMHgzNC8weGM0DQogIGRtYV9ub25jb2hlcmVudF9tYXBfcGFnZSsweDg2LzB4OGMNCiAgdXNi X2hjZF9tYXBfdXJiX2Zvcl9kbWErMHg0OWUvMHg1M2MNCiAgdXNiX2hjZF9zdWJtaXRfdXJiKzB4 NDNjLzB4OGM0DQogIHVzYl9jb250cm9sX21zZysweGJlLzB4MTZjDQogIGh1Yl9wb3J0X2luaXQr MHg1ZTAvMHhiMGMNCiAgaHViX2V2ZW50KzB4NGU2LzB4MTE2NA0KICBwcm9jZXNzX29uZV93b3Jr KzB4MTk0LzB4MzQ4DQogIHdvcmtlcl90aHJlYWQrMHhmMi8weDQ3OA0KICBrdGhyZWFkKzB4MTIw LzB4MTNjDQogIHJldF9mcm9tX2ZvcmsrMHgxOC8weDFjDQogbW1jYmxrMDogcDEgcDINCiAqKiog YXJjaF9zeW5jX2RtYV9mb3JfZGV2aWNlQDE3MjogRE1BIGRpcmVjdGlvbiBpcyBETUFfRlJPTV9E RVZJQ0UgaW5zdGVhZCBvZiBETUFfVE9fREVWSUNFDQoNCi4uLg0KYW5kIHF1aXRlIHNvbWUgbW9y ZSBvZiB0aGUgc2ltaWxhcg0KLi4uDQotLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t LS0tLS0tPjgtLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0NCg0KSW4gY2FzZSBv ZiBNTUMvRFdfTUNJIChBS0EgRGVzaWduV2FyZSBNb2JpbGVTdG9yYWdlIGNvbnRyb2xsZXIpIHRo YXQncyBhbiBleGVjdXRpb24gZmxvdzoNCjEpIF9fZHdfbWNpX3N0YXJ0X3JlcXVlc3QoKQ0KMikg ZHdfbWNpX3ByZV9kbWFfdHJhbnNmZXIoKQ0KMykgZG1hX21hcF9zZyguLi4sIG1tY19nZXRfZG1h X2RpcihkYXRhKSkNCg0KTm90ZSBtbWNfZ2V0X2RtYV9kaXIoKSBpcyBqdXN0ICJkYXRhLT5mbGFn cyAmIE1NQ19EQVRBX1dSSVRFID8gRE1BX1RPX0RFVklDRSA6IERNQV9GUk9NX0RFVklDRSIuDQpJ LmUuIGlmIHdlJ3JlIHByZXBhcmluZyBmb3Igc2VuZGluZyBkYXRhIGRtYV9ub25jb2hlcmVudF9t YXBfc2coKSB3aWxsIGhhdmUgRE1BX1RPX0RFVklDRSB3aGljaA0KaXMgcXVpdGUgT0sgZm9yIHBh c3NpbmcgdG8gZG1hX25vbmNvaGVyZW50X3N5bmNfc2dfZm9yX2RldmljZSgpIGJ1dCBpbiBjYXNl IG9mIHJlYWRpbmcgd2UnbGwgaGF2ZQ0KRE1BX0ZST01fREVWSUNFIHdoaWNoIHdlJ2xsIHBhc3Mg dG8gZG1hX25vbmNvaGVyZW50X3N5bmNfc2dfZm9yX2RldmljZSgpIGluIGRtYV9ub25jb2hlcmVu dF9tYXBfc2coKS4NCg0KSSdkIHNheSB0aGlzIGlzIG5vdCBlbnRpcmVseSBjb3JyZWN0IGJlY2F1 c2UgSU1ITyBhcmNoX3N5bmNfZG1hX2Zvcl9jcHUoKSBpcyBzdXBwb3NlZCB0byBvbmx5IGJlIHVz ZWQNCmluIGNhc2Ugb2YgRE1BX0ZST01fREVWSUNFIGFuZCBhcmNoX3N5bmNfZG1hX2Zvcl9kZXZp Y2UoKSBvbmx5IGluIGNhc2Ugb2YgRE1BX1RPX0RFVklDRS4NCg0KDQo+ICtzdGF0aWMgZG1hX2Fk ZHJfdCBkbWFfbm9uY29oZXJlbnRfbWFwX3BhZ2Uoc3RydWN0IGRldmljZSAqZGV2LCBzdHJ1Y3Qg cGFnZSAqcGFnZSwNCj4gKwkJdW5zaWduZWQgbG9uZyBvZmZzZXQsIHNpemVfdCBzaXplLCBlbnVt IGRtYV9kYXRhX2RpcmVjdGlvbiBkaXIsDQo+ICsJCXVuc2lnbmVkIGxvbmcgYXR0cnMpDQo+ICt7 DQo+ICsJZG1hX2FkZHJfdCBhZGRyOw0KPiArDQo+ICsJYWRkciA9IGRtYV9kaXJlY3RfbWFwX3Bh Z2UoZGV2LCBwYWdlLCBvZmZzZXQsIHNpemUsIGRpciwgYXR0cnMpOw0KPiArCWlmICghZG1hX21h cHBpbmdfZXJyb3IoZGV2LCBhZGRyKSAmJiAhKGF0dHJzICYgRE1BX0FUVFJfU0tJUF9DUFVfU1lO QykpDQo+ICsJCWFyY2hfc3luY19kbWFfZm9yX2RldmljZShkZXYsIHBhZ2VfdG9fcGh5cyhwYWdl KSwgc2l6ZSwgZGlyKTsNCj4gKwlyZXR1cm4gYWRkcjsNCj4gK30NCj4gKw0KPiArc3RhdGljIGlu dCBkbWFfbm9uY29oZXJlbnRfbWFwX3NnKHN0cnVjdCBkZXZpY2UgKmRldiwgc3RydWN0IHNjYXR0 ZXJsaXN0ICpzZ2wsDQo+ICsJCWludCBuZW50cywgZW51bSBkbWFfZGF0YV9kaXJlY3Rpb24gZGly LCB1bnNpZ25lZCBsb25nIGF0dHJzKQ0KPiArew0KPiArCW5lbnRzID0gZG1hX2RpcmVjdF9tYXBf c2coZGV2LCBzZ2wsIG5lbnRzLCBkaXIsIGF0dHJzKTsNCj4gKwlpZiAobmVudHMgPiAwICYmICEo YXR0cnMgJiBETUFfQVRUUl9TS0lQX0NQVV9TWU5DKSkNCj4gKwkJZG1hX25vbmNvaGVyZW50X3N5 bmNfc2dfZm9yX2RldmljZShkZXYsIHNnbCwgbmVudHMsIGRpcik7DQo+ICsJcmV0dXJuIG5lbnRz Ow0KPiArfQ0KDQpUaGUgc2FtZSBpcyBmb3IgdW5tYXAgZnVuY3Rpb25zLg0KTXkgZ3Vlc3MgaXMg d2UgbmVlZCB0byByZXNwZWN0IGRpcmVjdGlvbiBpbiBtYXAvdW5tYXAgZnVuY3Rpb25zIGFuZCB1 c2UNCmVpdGhlciBkbWFfbm9uY29oZXJlbnRfc3luY19zaW5nbGVfZm9yX2NwdSguLi4sIERNQV9G Uk9NX0RFVklDRSkgb3INCmRtYV9ub25jb2hlcmVudF9zeW5jX3NpbmdsZV9mb3JfZGV2aWNlKC4u LixETUFfVE9fREVWSUNFKS4NCg0KDQo+ICtzdGF0aWMgdm9pZCBkbWFfbm9uY29oZXJlbnRfdW5t YXBfcGFnZShzdHJ1Y3QgZGV2aWNlICpkZXYsIGRtYV9hZGRyX3QgYWRkciwNCj4gKwkJc2l6ZV90 IHNpemUsIGVudW0gZG1hX2RhdGFfZGlyZWN0aW9uIGRpciwgdW5zaWduZWQgbG9uZyBhdHRycykN Cj4gK3sNCj4gKwlpZiAoIShhdHRycyAmIERNQV9BVFRSX1NLSVBfQ1BVX1NZTkMpKQ0KPiArCQlk bWFfbm9uY29oZXJlbnRfc3luY19zaW5nbGVfZm9yX2NwdShkZXYsIGFkZHIsIHNpemUsIGRpcik7 DQo+ICt9DQo+ICsNCj4gK3N0YXRpYyB2b2lkIGRtYV9ub25jb2hlcmVudF91bm1hcF9zZyhzdHJ1 Y3QgZGV2aWNlICpkZXYsIHN0cnVjdCBzY2F0dGVybGlzdCAqc2dsLA0KPiArCQlpbnQgbmVudHMs IGVudW0gZG1hX2RhdGFfZGlyZWN0aW9uIGRpciwgdW5zaWduZWQgbG9uZyBhdHRycykNCj4gK3sN Cj4gKwlpZiAoIShhdHRycyAmIERNQV9BVFRSX1NLSVBfQ1BVX1NZTkMpKQ0KPiArCQlkbWFfbm9u Y29oZXJlbnRfc3luY19zZ19mb3JfY3B1KGRldiwgc2dsLCBuZW50cywgZGlyKTsNCj4gK30NCj4g KyNlbmRpZg0KDQpCdXQgdGhlIHJlYWwgZml4IG9mIG15IHByb2JsZW0gaXM6DQotLS0tLS0tLS0t LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tPjgtLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t LS0tLS0tLS0tLS0NCi0tLSBhL2xpYi9kbWEtbm9uY29oZXJlbnQuYw0KKysrIGIvbGliL2RtYS1u b25jb2hlcmVudC5jDQpAQCAtMzUsNyArMzUsNyBAQCBzdGF0aWMgZG1hX2FkZHJfdCBkbWFfbm9u Y29oZXJlbnRfbWFwX3BhZ2Uoc3RydWN0IGRldmljZSAqZGV2LCBzdHJ1Y3QgcGFnZSAqcGFnZQ0K IA0KICAgICAgICBhZGRyID0gZG1hX2RpcmVjdF9tYXBfcGFnZShkZXYsIHBhZ2UsIG9mZnNldCwg c2l6ZSwgZGlyLCBhdHRycyk7DQogICAgICAgIGlmICghZG1hX21hcHBpbmdfZXJyb3IoZGV2LCBh ZGRyKSAmJiAhKGF0dHJzICYgRE1BX0FUVFJfU0tJUF9DUFVfU1lOQykpDQotICAgICAgICAgICAg ICAgYXJjaF9zeW5jX2RtYV9mb3JfZGV2aWNlKGRldiwgcGFnZV90b19waHlzKHBhZ2UpLCBzaXpl LCBkaXIpOw0KKyAgICAgICAgICAgICAgIGFyY2hfc3luY19kbWFfZm9yX2RldmljZShkZXYsIHBh Z2VfdG9fcGh5cyhwYWdlKSArIG9mZnNldCwgc2l6ZSwgZGlyKTsNCiAgICAgICAgcmV0dXJuIGFk ZHI7DQogfQ0KLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLT44LS0tLS0t LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tDQoNCllvdSBzZWVtIHRvIGxvc3QgYW4gb2Zm c2V0IGluIHRoZSBwYWdlIHNvIGlmIHdlIGhhcHBlbiB0byBoYXZlIGEgYnVmZmVyIG5vdCBhbGln bmVkIHRvDQphIHBhZ2UgYm91bmRhcnkgdGhlbiB3ZSB3ZXJlIG9idmlvdXNseSBjb3JydXB0aW5n IGRhdGEgb3V0c2lkZSBvdXIgZGF0YSA6KQ0KDQotQWxleGV5 -- To unsubscribe from this list: send the line "unsubscribe linux-parisc" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On Fri, May 18, 2018 at 01:03:46PM +0000, Alexey Brodkin wrote: > Note mmc_get_dma_dir() is just "data->flags & MMC_DATA_WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE". > I.e. if we're preparing for sending data dma_noncoherent_map_sg() will have DMA_TO_DEVICE which > is quite OK for passing to dma_noncoherent_sync_sg_for_device() but in case of reading we'll have > DMA_FROM_DEVICE which we'll pass to dma_noncoherent_sync_sg_for_device() in dma_noncoherent_map_sg(). > > I'd say this is not entirely correct because IMHO arch_sync_dma_for_cpu() is supposed to only be used > in case of DMA_FROM_DEVICE and arch_sync_dma_for_device() only in case of DMA_TO_DEVICE. arc overrides the dir paramter of the dma_sync_single_for_device/ dma_sync_single_for_cpu calls. My patches dropped that, and I have restored that, and audit for the other architectures is pending. That being said the existing arc code still looks rather odd as it didn't do the same thing for the scatterlist versions of the calls. I've thrown in a few patches into my new tree to make the sg versions make the normal calls, and to clean up the area a bit. > You seem to lost an offset in the page so if we happen to have a buffer not aligned to > a page boundary then we were obviously corrupting data outside our data :) Oops! Thank you for all the debugging! -- To unsubscribe from this list: send the line "unsubscribe linux-parisc" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
Hi Christoph, On Fri, 2018-05-18 at 15:27 +0200, hch@lst.de wrote: > On Fri, May 18, 2018 at 01:03:46PM +0000, Alexey Brodkin wrote: > > Note mmc_get_dma_dir() is just "data->flags & MMC_DATA_WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE". > > I.e. if we're preparing for sending data dma_noncoherent_map_sg() will have DMA_TO_DEVICE which > > is quite OK for passing to dma_noncoherent_sync_sg_for_device() but in case of reading we'll have > > DMA_FROM_DEVICE which we'll pass to dma_noncoherent_sync_sg_for_device() in dma_noncoherent_map_sg(). > > > > I'd say this is not entirely correct because IMHO arch_sync_dma_for_cpu() is supposed to only be used > > in case of DMA_FROM_DEVICE and arch_sync_dma_for_device() only in case of DMA_TO_DEVICE. > > arc overrides the dir paramter of the dma_sync_single_for_device/ > dma_sync_single_for_cpu calls. My patches dropped that, and I have > restored that, and audit for the other architectures is pending. Well at least for me that's a confusion what is a reason to pass direction to function which purpose is already known. I'd say that XXX_sync_for_device() doesn't need _variable_ direction as an argument, otherwise what does that mean if we pass DMA_FROM_DEVICE to that function? > That being said the existing arc code still looks rather odd as it > didn't do the same thing for the scatterlist versions of the calls. That might easily be the case so good we caught that now and it will be fixed :) > I've thrown in a few patches into my new tree to make the sg versions > make the normal calls, and to clean up the area a bit. I'll try your newer series now, thanks! -Alexey
On 05/18/2018 06:11 AM, Alexey Brodkin wrote: > void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, > size_t size, enum dma_data_direction dir) > { > + if (dir != DMA_TO_DEVICE){ > + dump_stack(); > + printk(" *** %s@%d: DMA direction is %s instead of %s\n", > + __func__, __LINE__, dir_to_str(dir), dir_to_str(DMA_TO_DEVICE)); > + } > + > return _dma_cache_sync(dev, paddr, size, dir); > } > > void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, > size_t size, enum dma_data_direction dir) > { > + if (dir != DMA_FROM_DEVICE) { > + dump_stack(); > + printk(" *** %s@%d: DMA direction is %s instead of %s\n", > + __func__, __LINE__, dir_to_str(dir), dir_to_str(DMA_FROM_DEVICE)); > + } > + > return _dma_cache_sync(dev, paddr, size, dir); > } ... > In case of MMC/DW_MCI (AKA DesignWare MobileStorage controller) that's an execution flow: > 1) __dw_mci_start_request() > 2) dw_mci_pre_dma_transfer() > 3) dma_map_sg(..., mmc_get_dma_dir(data)) > > Note mmc_get_dma_dir() is just "data->flags & MMC_DATA_WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE". > I.e. if we're preparing for sending data dma_noncoherent_map_sg() will have DMA_TO_DEVICE which > is quite OK for passing to dma_noncoherent_sync_sg_for_device() but in case of reading we'll have > DMA_FROM_DEVICE which we'll pass to dma_noncoherent_sync_sg_for_device() in dma_noncoherent_map_sg(). > > I'd say this is not entirely correct because IMHO arch_sync_dma_for_cpu() is supposed to only be used > in case of DMA_FROM_DEVICE and arch_sync_dma_for_device() only in case of DMA_TO_DEVICE. So roughly 10 years ago, some kernel rookie name Vineet Gupta, asked the exact same question :-) http://kernelnewbies.kernelnewbies.narkive.com/aGW1QcDv/query-about-dma-sync-for-cpu-and-direction-to-device I never understood the need for this direction. And if memory serves me right, at that time I was seeing twice the amount of cache flushing ! > But the real fix of my problem is: > ---------------------------------------->8------------------------------------ > --- a/lib/dma-noncoherent.c > +++ b/lib/dma-noncoherent.c > @@ -35,7 +35,7 @@ static dma_addr_t dma_noncoherent_map_page(struct device *dev, struct page *page > > addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); > if (!dma_mapping_error(dev, addr) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) > - arch_sync_dma_for_device(dev, page_to_phys(page), size, dir); > + arch_sync_dma_for_device(dev, page_to_phys(page) + offset, size, dir); > return addr; > } > ---------------------------------------->8------------------------------------ > > You seem to lost an offset in the page so if we happen to have a buffer not aligned to > a page boundary then we were obviously corrupting data outside our data :) Neat ! -- To unsubscribe from this list: send the line "unsubscribe linux-parisc" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On 05/18/2018 06:23 AM, hch@lst.de wrote: > Fri, May 18, 2018 at 01:03:46PM +0000, Alexey Brodkin wrote: >> Note mmc_get_dma_dir() is just "data->flags & MMC_DATA_WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE". >> I.e. if we're preparing for sending data dma_noncoherent_map_sg() will have DMA_TO_DEVICE which >> is quite OK for passing to dma_noncoherent_sync_sg_for_device() but in case of reading we'll have >> DMA_FROM_DEVICE which we'll pass to dma_noncoherent_sync_sg_for_device() in dma_noncoherent_map_sg(). >> >> I'd say this is not entirely correct because IMHO arch_sync_dma_for_cpu() is supposed to only be used >> in case of DMA_FROM_DEVICE and arch_sync_dma_for_device() only in case of DMA_TO_DEVICE. > arc overrides the dir paramter of the dma_sync_single_for_device/ > dma_sync_single_for_cpu calls. My patches dropped that, and I have > restored that, and audit for the other architectures is pending. Right, for now lets retain that and do a sweeping audit of @direction - to me it seems extraneous (as it did 10 years ago), but I'm not an expert in this are so perhaps it is needed for some device / arches and it would be good to understand that finally. > That being said the existing arc code still looks rather odd as it > didn't do the same thing for the scatterlist versions of the calls. > I've thrown in a few patches into my new tree to make the sg versions > make the normal calls, and to clean up the area a bit. Not calling names or anything here, but it doesn't exist for sg variants, because I didn't write that code :-) It was introduced by your commi: 2016-01-20 052c96dbe33b arc: convert to dma_map_ops -- To unsubscribe from this list: send the line "unsubscribe linux-parisc" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On Fri, May 18, 2018 at 10:20:02AM -0700, Vineet Gupta wrote: > I never understood the need for this direction. And if memory serves me > right, at that time I was seeing twice the amount of cache flushing ! It's necessary. Take a moment to think carefully about this: dma_map_single(, dir) dma_sync_single_for_cpu(, dir) dma_sync_single_for_device(, dir) dma_unmap_single(, dir) In the case of a DMA-incoherent architecture, the operations done at each stage depend on the direction argument: map for_cpu for_device unmap TO_DEV writeback none writeback none TO_CPU invalidate invalidate* invalidate invalidate* BIDIR writeback invalidate writeback invalidate * - only necessary if the CPU speculatively prefetches. The multiple invalidations for the TO_CPU case handles different conditions that can result in data corruption, and for some CPUs, all four are necessary. This is what is implemented for 32-bit ARM, depending on the CPU capabilities, as we have DMA incoherent devices and we have CPUs that speculatively prefetch data, and so may load data into the caches while DMA is in operation. Things get more interesting if the implementation behind the DMA API has to copy data between the buffer supplied to the mapping and some DMA accessible buffer: map for_cpu for_device unmap TO_DEV copy to dma none copy to dma none TO_CPU none copy to cpu none copy to cpu BIDIR copy to dma copy to cpu copy to dma copy to cpu So, in both cases, the value of the direction argument defines what you need to do in each call.
SGkgUnVzc2VsLA0KDQpPbiBGcmksIDIwMTgtMDUtMTggYXQgMTg6NTAgKzAxMDAsIFJ1c3NlbGwg S2luZyAtIEFSTSBMaW51eCB3cm90ZToNCj4gSXQncyBuZWNlc3NhcnkuICBUYWtlIGEgbW9tZW50 IHRvIHRoaW5rIGNhcmVmdWxseSBhYm91dCB0aGlzOg0KPiANCj4gICAgICAgICBkbWFfbWFwX3Np bmdsZSgsIGRpcikNCj4gDQo+ICAgICAgICAgZG1hX3N5bmNfc2luZ2xlX2Zvcl9jcHUoLCBkaXIp DQo+IA0KPiAgICAgICAgIGRtYV9zeW5jX3NpbmdsZV9mb3JfZGV2aWNlKCwgZGlyKQ0KPiANCj4g ICAgICAgICBkbWFfdW5tYXBfc2luZ2xlKCwgZGlyKQ0KPiANCj4gSW4gdGhlIGNhc2Ugb2YgYSBE TUEtaW5jb2hlcmVudCBhcmNoaXRlY3R1cmUsIHRoZSBvcGVyYXRpb25zIGRvbmUgYXQgZWFjaA0K PiBzdGFnZSBkZXBlbmQgb24gdGhlIGRpcmVjdGlvbiBhcmd1bWVudDoNCj4gDQo+ICAgICAgICAg bWFwICAgICAgICAgICAgIGZvcl9jcHUgICAgICAgICBmb3JfZGV2aWNlICAgICAgdW5tYXANCj4g VE9fREVWICB3cml0ZWJhY2sgICAgICAgbm9uZSAgICAgICAgICAgIHdyaXRlYmFjayAgICAgICBu b25lDQo+IFRPX0NQVSAgaW52YWxpZGF0ZSAgICAgIGludmFsaWRhdGUqICAgICBpbnZhbGlkYXRl ICAgICAgaW52YWxpZGF0ZSoNCj4gQklESVIgICB3cml0ZWJhY2sgICAgICAgaW52YWxpZGF0ZSAg ICAgIHdyaXRlYmFjayAgICAgICBpbnZhbGlkYXRlDQo+IA0KPiAqIC0gb25seSBuZWNlc3Nhcnkg aWYgdGhlIENQVSBzcGVjdWxhdGl2ZWx5IHByZWZldGNoZXMuDQoNCkkgdGhpbmsgaW52YWxpZGF0 aW9uIG9mIERNQSBidWZmZXIgaXMgcmVxdWlyZWQgb24gZm9yX2NwdShUT19DUFUpIGV2ZW4NCmlm IENQVSBkb2Vzbid0IHByZWZlcmNoIC0gd2hhdCBpZiB3ZSByZXVzZSB0aGUgc2FtZSBidWZmZXIg Zm9yIG11bHRpcGxlDQpyZWFkcyBmcm9tIERNQSBkZXZpY2U/DQoNCj4gVGhlIG11bHRpcGxlIGlu dmFsaWRhdGlvbnMgZm9yIHRoZSBUT19DUFUgY2FzZSBoYW5kbGVzIGRpZmZlcmVudA0KPiBjb25k aXRpb25zIHRoYXQgY2FuIHJlc3VsdCBpbiBkYXRhIGNvcnJ1cHRpb24sIGFuZCBmb3Igc29tZSBD UFVzLCBhbGwNCj4gZm91ciBhcmUgbmVjZXNzYXJ5Lg0KDQpJIHdvdWxkIGFncmVlIHRoYXQgbWFw KCkvdW5tYXAoKSBhIHF1aXRlIGEgc3BlY2lhbCBjYXNlcyBhbmQgc28gZGVwZW5kaW5nDQpvbiBk aXJlY3Rpb24gd2UgbmVlZCB0byBleGVjdXRlIGluIHRoZW0gZWl0aGVyIGZvcl9jcHUoKSBvciBm b3JfZGV2aWNlKCkNCmNhbGwtYmFja3MgZGVwZW5kaW5nIG9uIGRpcmVjdGlvbi4NCg0KQXMgZm9y IGludmFsaWRhdGlvbiBpbiBjYXNlIG9mIGZvcl9kZXZpY2UoVE9fQ1BVKSBJIHN0aWxsIGRvbid0 IHNlZQ0KYSByYXRpb25hbGUgYmVoaW5kIGl0LiBXb3VsZCBiZSBpbnRlcmVzdGluZyB0byBzZWUg YSByZWFsIGV4YW1wbGUgd2hlcmUNCndlIGJlbmVmaXQgZnJvbSB0aGlzLg0KDQo+IFRoaXMgaXMg d2hhdCBpcyBpbXBsZW1lbnRlZCBmb3IgMzItYml0IEFSTSwgZGVwZW5kaW5nIG9uIHRoZSBDUFUN Cj4gY2FwYWJpbGl0aWVzLCBhcyB3ZSBoYXZlIERNQSBpbmNvaGVyZW50IGRldmljZXMgYW5kIHdl IGhhdmUgQ1BVcyB0aGF0DQo+IHNwZWN1bGF0aXZlbHkgcHJlZmV0Y2ggZGF0YSwgYW5kIHNvIG1h eSBsb2FkIGRhdGEgaW50byB0aGUgY2FjaGVzIHdoaWxlDQo+IERNQSBpcyBpbiBvcGVyYXRpb24u DQo+IA0KPiANCj4gVGhpbmdzIGdldCBtb3JlIGludGVyZXN0aW5nIGlmIHRoZSBpbXBsZW1lbnRh dGlvbiBiZWhpbmQgdGhlIERNQSBBUEkgaGFzDQo+IHRvIGNvcHkgZGF0YSBiZXR3ZWVuIHRoZSBi dWZmZXIgc3VwcGxpZWQgdG8gdGhlIG1hcHBpbmcgYW5kIHNvbWUgRE1BDQo+IGFjY2Vzc2libGUg YnVmZmVyOg0KPiANCj4gICAgICAgICBtYXAgICAgICAgICAgICAgZm9yX2NwdSAgICAgICAgIGZv cl9kZXZpY2UgICAgICB1bm1hcA0KPiBUT19ERVYgIGNvcHkgdG8gZG1hICAgICBub25lICAgICAg ICAgICAgY29weSB0byBkbWEgICAgIG5vbmUNCj4gVE9fQ1BVICBub25lICAgICAgICAgICAgY29w eSB0byBjcHUgICAgIG5vbmUgICAgICAgICAgICBjb3B5IHRvIGNwdQ0KPiBCSURJUiAgIGNvcHkg dG8gZG1hICAgICBjb3B5IHRvIGNwdSAgICAgY29weSB0byBkbWEgICAgIGNvcHkgdG8gY3B1DQo+ IA0KPiBTbywgaW4gYm90aCBjYXNlcywgdGhlIHZhbHVlIG9mIHRoZSBkaXJlY3Rpb24gYXJndW1l bnQgZGVmaW5lcyB3aGF0IHlvdQ0KPiBuZWVkIHRvIGRvIGluIGVhY2ggY2FsbC4NCg0KSW50ZXJl c3RpbmcgZW5vdWdoIGluIHlvdXIgc2VvbmQgdGFibGUgKHdoaWNoIGRlc2NyaWJlcyBtb3JlIGNv bXBsaWNhdGVkDQpjYXNlIGluZGVlZCkgeW91IHNldCAibm9uZSIgZm9yIGZvcl9kZXZpY2UoVE9f Q1BVKSB3aGljaCBsb29rcyBsb2dpY2FsDQp0byBtZS4NCg0KU28gSU1ITyB0aGF0J3Mgd2hhdCBt YWtlIHNlbnNlOg0KLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLT44LS0tLS0tLS0tLS0tLS0t LS0tLS0tLS0tLS0tLS0NCiAgICAgICAgbWFwICAgICAgICAgICAgIGZvcl9jcHUgICAgICAgICBm b3JfZGV2aWNlICAgICAgdW5tYXANClRPX0RFViAgd3JpdGViYWNrICAgICAgIG5vbmUgICAgICAg ICAgICB3cml0ZWJhY2sgICAgICAgbm9uZQ0KVE9fQ1BVICBub25lICAgICAgICAgICAgaW52YWxp ZGF0ZSAgICAgIG5vbmUgICAgICAgICAgICBpbnZhbGlkYXRlKg0KQklESVIgICB3cml0ZWJhY2sg ICAgICAgaW52YWxpZGF0ZSAgICAgIHdyaXRlYmFjayAgICAgICBpbnZhbGlkYXRlKg0KLS0tLS0t LS0tLS0tLS0tLS0tLS0tLS0tLS0tLT44LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0NCg0K KiBpcyB0aGUgY2FzZSBmb3IgcHJlZmV0Y2hpbmcgQ1BVLg0KDQotQWxleGV5 -- To unsubscribe from this list: send the line "unsubscribe linux-parisc" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On 18.05.2018 15:03, Alexey Brodkin wrote: > But the real fix of my problem is: > ---------------------------------------->8------------------------------------ > --- a/lib/dma-noncoherent.c > +++ b/lib/dma-noncoherent.c > @@ -35,7 +35,7 @@ static dma_addr_t dma_noncoherent_map_page(struct device *dev, struct page *page > > addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); > if (!dma_mapping_error(dev, addr) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) > - arch_sync_dma_for_device(dev, page_to_phys(page), size, dir); > + arch_sync_dma_for_device(dev, page_to_phys(page) + offset, size, dir); > return addr; > } > ---------------------------------------->8------------------------------------ > > You seem to lost an offset in the page so if we happen to have a buffer not aligned to > a page boundary then we were obviously corrupting data outside our data :) Good. This patch seems to fix the dma issues I faced on my 32bit B160L parisc box. So it leaves only one open issue on parisc: Now every 32 bit parisc system is unnecessarily non-coherent. Helge -- To unsubscribe from this list: send the line "unsubscribe linux-parisc" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On 05/18/2018 10:50 AM, Russell King - ARM Linux wrote: > On Fri, May 18, 2018 at 10:20:02AM -0700, Vineet Gupta wrote: >> I never understood the need for this direction. And if memory serves me >> right, at that time I was seeing twice the amount of cache flushing ! > It's necessary. Take a moment to think carefully about this: > > dma_map_single(, dir) > > dma_sync_single_for_cpu(, dir) > > dma_sync_single_for_device(, dir) > > dma_unmap_single(, dir) As an aside, do these imply a state machine of sorts - does a driver needs to always call map_single first ? My original point of contention/confusion is the specific combinations of API and direction, specifically for_cpu(TO_DEV) and for_device(TO_CPU) Semantically what does dma_sync_single_for_cpu(TO_DEV) even imply for a non dma coherent arch. Your tables below have "none" for both, implying it is unlikely to be a real combination (for ARM and ARC atleast). The other case, actually @dir TO_CPU, independent of for_{cpu, device} implies driver intends to touch it after the call, so it would invalidate any stray lines, unconditionally (and not just for speculative prefetch case). > In the case of a DMA-incoherent architecture, the operations done at each > stage depend on the direction argument: > > map for_cpu for_device unmap > TO_DEV writeback none writeback none > TO_CPU invalidate invalidate* invalidate invalidate* > BIDIR writeback invalidate writeback invalidate > > * - only necessary if the CPU speculatively prefetches. > > The multiple invalidations for the TO_CPU case handles different > conditions that can result in data corruption, and for some CPUs, all > four are necessary. Can you please explain in some more detail, TO_CPU row, why invalidate is conditional sometimes. -- To unsubscribe from this list: send the line "unsubscribe linux-parisc" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On Fri, May 18, 2018 at 07:57:34PM +0000, Alexey Brodkin wrote: > Hi Russel, That's Russell. > On Fri, 2018-05-18 at 18:50 +0100, Russell King - ARM Linux wrote: > > It's necessary. Take a moment to think carefully about this: > > > > dma_map_single(, dir) > > > > dma_sync_single_for_cpu(, dir) > > > > dma_sync_single_for_device(, dir) > > > > dma_unmap_single(, dir) > > > > In the case of a DMA-incoherent architecture, the operations done at each > > stage depend on the direction argument: > > > > map for_cpu for_device unmap > > TO_DEV writeback none writeback none > > TO_CPU invalidate invalidate* invalidate invalidate* > > BIDIR writeback invalidate writeback invalidate > > > > * - only necessary if the CPU speculatively prefetches. > > I think invalidation of DMA buffer is required on for_cpu(TO_CPU) even > if CPU doesn't preferch - what if we reuse the same buffer for multiple > reads from DMA device? That's fine - for non-coherent DMA, the CPU caches will only end up containing data for that memory if: - the CPU speculatively fetches data from that memory, or - the CPU explicitly touches that memory > > The multiple invalidations for the TO_CPU case handles different > > conditions that can result in data corruption, and for some CPUs, all > > four are necessary. > > I would agree that map()/unmap() a quite a special cases and so depending > on direction we need to execute in them either for_cpu() or for_device() > call-backs depending on direction. > > As for invalidation in case of for_device(TO_CPU) I still don't see > a rationale behind it. Would be interesting to see a real example where > we benefit from this. Yes, you could avoid that, but depending how you structure the architecture implementation, it can turn out to be a corner case. The above table is precisely how 32-bit ARM is implemented, because the way we implement them is based on who owns the memory - the "map" and "for_device" operations translate internally to a cpu-to-device ownership transition of the buffer. Similar for "unmap" and "to_cpu". It basically avoids having to add additional functions at the lower implementation levels. > > Things get more interesting if the implementation behind the DMA API has > > to copy data between the buffer supplied to the mapping and some DMA > > accessible buffer: > > > > map for_cpu for_device unmap > > TO_DEV copy to dma none copy to dma none > > TO_CPU none copy to cpu none copy to cpu > > BIDIR copy to dma copy to cpu copy to dma copy to cpu > > > > So, in both cases, the value of the direction argument defines what you > > need to do in each call. > > Interesting enough in your seond table (which describes more complicated > case indeed) you set "none" for for_device(TO_CPU) which looks logical > to me. > > So IMHO that's what make sense: > ---------------------------->8----------------------------- > map for_cpu for_device unmap > TO_DEV writeback none writeback none > TO_CPU none invalidate none invalidate* > BIDIR writeback invalidate writeback invalidate* > ---------------------------->8----------------------------- That doesn't make sense for the TO_CPU case. If the caches contain dirty cache lines, and you're DMAing from the device to the system RAM, other system activity can cause the dirty cache lines to be evicted (written back) to memory which the DMA has already overwritten. The result is data corruption. So, you really can't have "none" in the "map" case there. Given that, the "for_cpu" case becomes dependent on whether the CPU speculatively prefetches.
On Fri, May 18, 2018 at 01:35:08PM -0700, Vineet Gupta wrote: > On 05/18/2018 10:50 AM, Russell King - ARM Linux wrote: > >On Fri, May 18, 2018 at 10:20:02AM -0700, Vineet Gupta wrote: > >>I never understood the need for this direction. And if memory serves me > >>right, at that time I was seeing twice the amount of cache flushing ! > >It's necessary. Take a moment to think carefully about this: > > > > dma_map_single(, dir) > > > > dma_sync_single_for_cpu(, dir) > > > > dma_sync_single_for_device(, dir) > > > > dma_unmap_single(, dir) > > As an aside, do these imply a state machine of sorts - does a driver needs > to always call map_single first ? Kind-of, but some drivers do omit some of the dma_sync_*() calls. For example, if a buffer is written to, then mapped with TO_DEVICE, and then the CPU wishes to write to it, it's fairly common that a driver omits the dma_sync_single_for_cpu() call. If you think about the cases I gave and what cache operations happen, such a scenario practically turns out to be safe. > My original point of contention/confusion is the specific combinations of > API and direction, specifically for_cpu(TO_DEV) and for_device(TO_CPU) Remember that it is expected that all calls for a mapping use the same direction argument while that mapping exists. In other words, if you call dma_map_single(TO_DEVICE) and then use any of the other functions, the other functions will also use TO_DEVICE. The DMA direction argument describes the direction of the DMA operation being performed on the buffer, not on the individual dma_* operation. What isn't expected at arch level is for drivers to do: dma_map_single(TO_DEVICE) dma_sync_single_for_cpu(FROM_DEVICE) or vice versa. > Semantically what does dma_sync_single_for_cpu(TO_DEV) even imply for a non > dma coherent arch. > > Your tables below have "none" for both, implying it is unlikely to be a real > combination (for ARM and ARC atleast). Very little for the cases that I've stated (and as I mentioned above, some drivers do omit the call in that case.) > The other case, actually @dir TO_CPU, independent of for_{cpu, device} > implies driver intends to touch it after the call, so it would invalidate > any stray lines, unconditionally (and not just for speculative prefetch > case). If you don't have a CPU that speculatively prefetches, and you've already had to invalidate the cache lines (to avoid write-backs corrupting DMA'd data) then there's no need for the architecture to do any work at the for_cpu(TO_CPU) case - the CPU shouldn't be touching cache lines that are part of the buffer while it is mapped, which means a non-speculating CPU won't pull in any cache lines without an explicit access. Speculating CPUs are different. The action of the speculation is to try and guess what data the program wants to access ahead of the program flow. That causes the CPU to prefetch data into the cache. The point in the program flow that this happens is not really determinant to the programmer. This means that if you try to read from the DMA buffer after the DMA operation has complete without invalidating the cache between the DMA completing and the CPU reading, you have no guarantee that you're reading the data that the DMA operation has been written. The cache may have loaded itself with data before the DMA operation completed, and the CPU may see that stale data. The difference between non-speculating CPUs and speculating CPUs is that for non-speculating CPUs, caches work according to explicit accesses by the program, and the program is stalled while the data is fetched from external memory. Speculating CPUs try to predict ahead of time what data the program will require in the future, and attempt to load that data into the caches _before_ the program requires it - which means that the program suffers fewer stalls. > >In the case of a DMA-incoherent architecture, the operations done at each > >stage depend on the direction argument: > > > > map for_cpu for_device unmap > >TO_DEV writeback none writeback none > >TO_CPU invalidate invalidate* invalidate invalidate* > >BIDIR writeback invalidate writeback invalidate > > > >* - only necessary if the CPU speculatively prefetches. > > > >The multiple invalidations for the TO_CPU case handles different > >conditions that can result in data corruption, and for some CPUs, all > >four are necessary. > > Can you please explain in some more detail, TO_CPU row, why invalidate is > conditional sometimes. See above - I hope my explanation above is sufficient.
On Fri, May 18, 2018 at 10:05:51PM +0200, Helge Deller wrote: > This patch seems to fix the dma issues I faced on my 32bit B160L parisc box. > > So it leaves only one open issue on parisc: > Now every 32 bit parisc system is unnecessarily non-coherent. I diagree with those comments, let me resend the refactored patch to make it more clear. -- To unsubscribe from this list: send the line "unsubscribe linux-parisc" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
diff --git a/MAINTAINERS b/MAINTAINERS index 79bb02ff812f..08d0d15d4958 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4334,12 +4334,14 @@ W: http://git.infradead.org/users/hch/dma-mapping.git S: Supported F: lib/dma-debug.c F: lib/dma-direct.c +F: lib/dma-noncoherent.c F: lib/dma-virt.c F: drivers/base/dma-mapping.c F: drivers/base/dma-coherent.c F: include/asm-generic/dma-mapping.h F: include/linux/dma-direct.h F: include/linux/dma-mapping.h +F: include/linux/dma-noncoherent.h DME1737 HARDWARE MONITOR DRIVER M: Juerg Haefliger <juergh@gmail.com> diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h index 880a292d792f..ad2868263867 100644 --- a/include/asm-generic/dma-mapping.h +++ b/include/asm-generic/dma-mapping.h @@ -4,7 +4,16 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { + /* + * Use the non-coherent ops if available. If an architecture wants a + * more fine-grained selection of operations it will have to implement + * get_arch_dma_ops itself or use the per-device dma_ops. + */ +#ifdef CONFIG_DMA_NONCOHERENT_OPS + return &dma_noncoherent_ops; +#else return &dma_direct_ops; +#endif } #endif /* _ASM_GENERIC_DMA_MAPPING_H */ diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index 53ad6a47f513..8d9f33febde5 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -59,6 +59,11 @@ void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs); +dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, enum dma_data_direction dir, + unsigned long attrs); +int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, + enum dma_data_direction dir, unsigned long attrs); int dma_direct_supported(struct device *dev, u64 mask); - +int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr); #endif /* _LINUX_DMA_DIRECT_H */ diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 25a9a2b04f78..4be070df5fc5 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -136,6 +136,7 @@ struct dma_map_ops { }; extern const struct dma_map_ops dma_direct_ops; +extern const struct dma_map_ops dma_noncoherent_ops; extern const struct dma_map_ops dma_virt_ops; #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h new file mode 100644 index 000000000000..10b2654d549b --- /dev/null +++ b/include/linux/dma-noncoherent.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_DMA_NONCOHERENT_H +#define _LINUX_DMA_NONCOHERENT_H 1 + +#include <linux/dma-mapping.h> + +void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t gfp, unsigned long attrs); +void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t dma_addr, unsigned long attrs); + +#ifdef CONFIG_DMA_NONCOHERENT_MMAP +int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs); +#else +#define arch_dma_mmap NULL +#endif /* CONFIG_DMA_NONCOHERENT_MMAP */ + +#ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC +void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, + enum dma_data_direction direction); +#else +#define arch_dma_cache_sync NULL +#endif /* CONFIG_DMA_NONCOHERENT_CACHE_SYNC */ + +#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE +void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir); +#else +static inline void arch_sync_dma_for_device(struct device *dev, + phys_addr_t paddr, size_t size, enum dma_data_direction dir) +{ +} +#endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */ + +#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU +void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir); +#else +static inline void arch_sync_dma_for_cpu(struct device *dev, + phys_addr_t paddr, size_t size, enum dma_data_direction dir) +{ +} +#endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */ + +#endif /* _LINUX_DMA_NONCOHERENT_H */ diff --git a/lib/Kconfig b/lib/Kconfig index 6c4e9d0ce5d1..7a913937888b 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -441,10 +441,30 @@ config ARCH_DMA_ADDR_T_64BIT config IOMMU_HELPER bool +config ARCH_HAS_SYNC_DMA_FOR_DEVICE + bool + +config ARCH_HAS_SYNC_DMA_FOR_CPU + bool + select NEED_DMA_MAP_STATE + config DMA_DIRECT_OPS bool depends on HAS_DMA +config DMA_NONCOHERENT_OPS + bool + depends on HAS_DMA + select DMA_DIRECT_OPS + +config DMA_NONCOHERENT_MMAP + bool + depends on DMA_NONCOHERENT_OPS + +config DMA_NONCOHERENT_CACHE_SYNC + bool + depends on DMA_NONCOHERENT_OPS + config DMA_VIRT_OPS bool depends on HAS_DMA diff --git a/lib/Makefile b/lib/Makefile index 94203b5eecd4..9f18c8152281 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -30,6 +30,7 @@ lib-$(CONFIG_PRINTK) += dump_stack.o lib-$(CONFIG_MMU) += ioremap.o lib-$(CONFIG_SMP) += cpumask.o lib-$(CONFIG_DMA_DIRECT_OPS) += dma-direct.o +lib-$(CONFIG_DMA_NONCOHERENT_OPS) += dma-noncoherent.o lib-$(CONFIG_DMA_VIRT_OPS) += dma-virt.o lib-y += kobject.o klist.o diff --git a/lib/dma-direct.c b/lib/dma-direct.c index df9e726e0712..b824eb218782 100644 --- a/lib/dma-direct.c +++ b/lib/dma-direct.c @@ -128,7 +128,7 @@ void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, free_pages((unsigned long)cpu_addr, page_order); } -static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, +dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, unsigned long attrs) { @@ -139,8 +139,8 @@ static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, return dma_addr; } -static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, - int nents, enum dma_data_direction dir, unsigned long attrs) +int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, + enum dma_data_direction dir, unsigned long attrs) { int i; struct scatterlist *sg; @@ -175,7 +175,7 @@ int dma_direct_supported(struct device *dev, u64 mask) return 1; } -static int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr) +int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr) { return dma_addr == DIRECT_MAPPING_ERROR; } diff --git a/lib/dma-noncoherent.c b/lib/dma-noncoherent.c new file mode 100644 index 000000000000..a2c192b3508d --- /dev/null +++ b/lib/dma-noncoherent.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 Christoph Hellwig. + * + * DMA operations that map physical memory directly without providing cache + * coherence. + */ +#include <linux/export.h> +#include <linux/mm.h> +#include <linux/dma-direct.h> +#include <linux/dma-noncoherent.h> +#include <linux/scatterlist.h> + +static void dma_noncoherent_sync_single_for_device(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ + arch_sync_dma_for_device(dev, dma_to_phys(dev, addr), size, dir); +} + +static void dma_noncoherent_sync_sg_for_device(struct device *dev, + struct scatterlist *sgl, int nents, enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgl, sg, nents, i) + arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir); +} + +static dma_addr_t dma_noncoherent_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ + dma_addr_t addr; + + addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); + if (!dma_mapping_error(dev, addr) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + arch_sync_dma_for_device(dev, page_to_phys(page), size, dir); + return addr; +} + +static int dma_noncoherent_map_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + nents = dma_direct_map_sg(dev, sgl, nents, dir, attrs); + if (nents > 0 && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + dma_noncoherent_sync_sg_for_device(dev, sgl, nents, dir); + return nents; +} + +#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU +static void dma_noncoherent_sync_single_for_cpu(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ + arch_sync_dma_for_cpu(dev, dma_to_phys(dev, addr), size, dir); +} + +static void dma_noncoherent_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sgl, int nents, enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgl, sg, nents, i) + arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir); +} + +static void dma_noncoherent_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + dma_noncoherent_sync_single_for_cpu(dev, addr, size, dir); +} + +static void dma_noncoherent_unmap_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + dma_noncoherent_sync_sg_for_cpu(dev, sgl, nents, dir); +} +#endif + +const struct dma_map_ops dma_noncoherent_ops = { + .alloc = arch_dma_alloc, + .free = arch_dma_free, + .mmap = arch_dma_mmap, + .sync_single_for_device = dma_noncoherent_sync_single_for_device, + .sync_sg_for_device = dma_noncoherent_sync_sg_for_device, + .map_page = dma_noncoherent_map_page, + .map_sg = dma_noncoherent_map_sg, +#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU + .sync_single_for_cpu = dma_noncoherent_sync_single_for_cpu, + .sync_sg_for_cpu = dma_noncoherent_sync_sg_for_cpu, + .unmap_page = dma_noncoherent_unmap_page, + .unmap_sg = dma_noncoherent_unmap_sg, +#endif + .dma_supported = dma_direct_supported, + .mapping_error = dma_direct_mapping_error, + .cache_sync = arch_dma_cache_sync, +}; +EXPORT_SYMBOL(dma_noncoherent_ops);
Add a new dma_map_ops implementation that uses dma-direct for the address mapping of streaming mappings, and which requires arch-specific implemenations of coherent allocate/free. Architectures have to provide flushing helpers to ownership trasnfers to the device and/or CPU, and can provide optional implementations of the coherent mmap functionality, and the cache_flush routines for non-coherent long term allocations. Signed-off-by: Christoph Hellwig <hch@lst.de> --- MAINTAINERS | 2 + include/asm-generic/dma-mapping.h | 9 +++ include/linux/dma-direct.h | 7 ++- include/linux/dma-mapping.h | 1 + include/linux/dma-noncoherent.h | 47 ++++++++++++++ lib/Kconfig | 20 ++++++ lib/Makefile | 1 + lib/dma-direct.c | 8 +-- lib/dma-noncoherent.c | 101 ++++++++++++++++++++++++++++++ 9 files changed, 191 insertions(+), 5 deletions(-) create mode 100644 include/linux/dma-noncoherent.h create mode 100644 lib/dma-noncoherent.c